00000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xd, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2796.147923][T20855] bond618: entered promiscuous mode [ 2796.163411][T20855] 8021q: adding VLAN 0 to HW filter on device bond618 17:02:41 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0x10}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:02:41 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xc80e0000}, 0x0) [ 2796.215083][T20862] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2796.258389][T20862] workqueue: Failed to create a rescuer kthread for wq "bond489": -EINTR [ 2796.311858][T20871] validate_nla: 11 callbacks suppressed [ 2796.311884][T20871] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:02:41 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000200), 0xf000) sendfile(r1, r2, 0x0, 0xf03b0000) (async) sendfile(r1, r2, 0x0, 0xf03b0000) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000200), 0xf000) sendfile(r3, r4, 0x0, 0xf03b0000) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r5, r4, &(0x7f00000002c0)=0x335773c3, 0x8) (async) sendfile(r5, r4, &(0x7f00000002c0)=0x335773c3, 0x8) getsockname$tipc(r4, &(0x7f0000000340)=@name, &(0x7f0000000440)=0x10) getsockopt$TIPC_SRC_DROPPABLE(r4, 0x10f, 0x80, &(0x7f0000000480), &(0x7f00000004c0)=0x4) (async) getsockopt$TIPC_SRC_DROPPABLE(r4, 0x10f, 0x80, &(0x7f0000000480), &(0x7f00000004c0)=0x4) sendfile(r1, r1, &(0x7f0000000180), 0x0) (async) sendfile(r1, r1, &(0x7f0000000180), 0x0) getsockname$packet(r1, &(0x7f0000000280)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000300)=0x14) r6 = socket(0x10, 0x803, 0x0) sendmsg$NBD_CMD_DISCONNECT(r6, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x33}}, 0x0) (async) sendmsg$NBD_CMD_DISCONNECT(r6, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x33}}, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x59) (async) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x59) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c00000010000d0700155abff63400000000ff0f", @ANYRES32=r7, @ANYBLOB="00000000e60000001c0012000c000100626f6e64"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c00000010000d0700155abff63400000000ff0f", @ANYRES32=r7, @ANYBLOB="00000000e60000001c0012000c000100626f6e64"], 0x3c}}, 0x0) r8 = socket$netlink(0x10, 0x3, 0x0) r9 = socket$packet(0x11, 0x3, 0x300) socket$nl_route(0x10, 0x3, 0x0) (async) r10 = socket$nl_route(0x10, 0x3, 0x0) getsockname$packet(r6, &(0x7f00000002c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000240)=0x14) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='rcu_utilization\x00'}, 0x10) r12 = getgid() sendmmsg$unix(0xffffffffffffffff, &(0x7f0000004480)=[{{&(0x7f0000000500)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f00000015c0)=[{&(0x7f00000005c0)="683cffbb2147596fb0e4095bf750d62c61be46d1cdc5a3b63b40f3cc0a8f9b5caacec4ce2a9473653df28e68f5450b0cc6a70fb736e3236d9ff7d616c0a688287f9360e23806dc52c5d273913454788856001fd853c9d747e6149fa83784c986904b0a40de2138ff7cf9c910bdf5f9442b7d9b8b128c222291fade37f64b77e6c453b6e12de245c19649da9603e372b27dd0bc497aa48b82910810eee096fb2bfa783e5738ce160dc4b3142ff74a2fc543d3dccdffc1f27a52f94bd81e0133c6527843b1ec431eb16d8ec9cf881e2a25351705e2b17659f70a5a072475fe0bf536ed05e23c8bb38e5b28da5fc2d73b99003179d6e5698041506a9ec360d0df49b4606f96d6b8d4aa56fbba2ffd5012cb985b7abe217f966c6e01fc4790f65bf18ec175c519e9335ad7349e6610886e524b43389a839e0bcade9f1c98e9f56c2bedf532e5496c9b9c8ec5ae942bef59d6f1d22b85fb0d7a44aad010fbd551e3a5da2dc9c774790e09cea8456520d79da6ea62ffc21175fbefb8f0f55b17e6d1e60af24ff72d51ba8f1b6578601a15c035a28e711bae93c97fc0949f7a06da477c4444c8a914b50478deee0224a4e8c75775f66aea9ab01a5522a89f765073a07f950bcb52bf4b98fcf83be2cbe35baf39b950a6d60a8ba43758dbe7fda45f337d1f0fc231e6d9b26c2db7dba813b0466a3cc16555d6a070b895da380ca4b8a585485f31aaef36bb7608bd962aee4dbb50a9232c1e1f562b31e895a37fa37c9a226041fdbc0e31445698057d1a0228eb75a303df57fd8c435365080d92ca81c6581bfbe97d492d19bf8cd5b1f4021822f5adfe51803c8d6c57907d2ec09c7663c212f18d4dcd547d2a28b42b084cac7d446421ff968d1830494ac1744ff5bb344775ae312a5684da39e24759eaa023f3978d19a986da51bf5986f27c3807a33d40c80270aa228ae58f513426d4cf7f629d7d641c1c73ecfdea8c7e963aa96d6e58a28036480cd2b0974eebb85d6b49b7ccfd3197dedfe26e2af6d772d8165a41738d7031d4b9504c3a803372ebe01711b14edc593cd6715c43753da6bb3e0fdc41e27a199d23ecae356ca9092f57d47ee859252c316ce1b10e705495b42b21140dc8ecd518613e0898c0b1652c9ecda2d03a6624ae3368294eb19f29f3180e1e2263a9e61be3b11a9f7c3f67cf6a061bc68994d1cfe6ca6e692a2b9812c5e261d6551c9428391038c3524bac2c10ae42fb1e8dafaecc64c1ec2c24fcbcffa89d0c06dae7187fc072077de82884d8640d808bae49b4c8624fb9d9fee042e00d4fe08cc0fef2f376ed4e1f3486a72861792f12f4886cb79dce905f6230fb25b3253ccb462b909582f5cb92d2e4b711471458baa24247be07c269884446d0433d30c1be4d28c530997189906f378deed790e401ff8ce06782cf0dcf20ad79c5279356f516866ef2daea3a3277614aa3066651374d64b34e3d42d0dd0b205aa2f85414f42578ec7d04dda949521f7910da5bce5e6fc2dfa306756074a7af0a8bd14bc2e0fc980a88e5d556068d473da35f795ae4c9ffbcbb536aaf79b44c0c880f8b6425bb3ec005a2407727c055876081320bfa813ab47e64422058f6cb50a9abd1a33c1b4abee010bb59f1a93efee4eb1e410fe631e4a4c313d80f8b7af2069ebc287b742b92e1e221db8f8bf3e25a0202fcdfde403a805fce7ecb4df4221e8b238c20a04156c034c7f7448b6a7ca8a3f88a5ecadcb8a8c5cb3f8c0721da72458f8a07eccc99cc9941a4b0e12ee7f336d4beda68d30316eff2f77c0ca73e302c1758e672a9ae6a4c112142f5f47df827c511ba1133fa5aefc476b9c9b10bb8125e09d26e967086fed590bb96688ca45ac451458c0357cfeb2fd480bc2b36d3524ee83f90437b30f041f593e6938f219d82999f3a396e7be4294a93ad48854c80a05cd4fd6622a96988d65983e3a3e007f8e58a52ecaba1b7b0b1cbbd7481a8748ffbf8cf9a9cf286fd02d2be22691e4b7c0b0961c97e26095f6cf91a9c570d020d81ebf6011495bf629018775ae4f9226546bd4c43115c504c4b06289db8c5bcd9581973067a4bb82321ca787a04d16086411ad04f0eba8120f41b40476a2545affe6445e35c781bcd4927b58cfa3e80c51a3850a095fb1b531aa290230ef44217617ed95bc67a043e122c02b3d09faa5b2a4d6fde614b39dc708372f11a301e649057e5a2c87c4fbb57489e80c352cc6b19ac14b6547849675dd50266abc9d41b839e6e5133f04075bf43b84a190d64364fe7cc71abfe00fe1016415f59b7b5f378b026a82c48651f5cc2f5f8ec8bb3d2d8a939b1b460eb3966e17add869b351bed0a023f69b07f60fa04eb36855c0343ee6abbaf3da368e772aa81c8ad3b690e5c08a16b5164fb6065fa415740b6865df14aeab3bd68ff6fdb446483e3f384878a01fc0d49b49dc7730c5caffd3af8c7d3da542885de8dd87c1795c923afa204c35d658e301e75db119d593f2c554387bfe49629881f8cf4dcd4a2a89399fba9408f63e382771df6e2a595737e11f73ef640bf14db2649820a5f2025d15cc3377ccb8ec59f95133021fceeef99be28a8c35b349e4b8e488f485e6a78f308e156720578ad82eebaed183f03e959e29e93f6eb72726823e338f1cbebf77b4525bc1dc2917adbf70f2240060abe58807cc6ae89e320021c0cdcd8257301b0ffeabda4463fa938b3357cf5fc490681e15f8660790d590e55df53e1b72f210ad7aaec827fae37e02ee45e7a583d10e94abd03ad974210e7358d45f41343c7a4c157e231efb10d5c26f1d6c85f280764ebec6911ddeea7af01644f933181dd5772c9f3bfa374f11b0cdc8faa3a39fee76f2bae0d2426e3b43a7ee2d42c3ab14c2f63c473b6e6aabf47811d0c542f87a1b5fe41668dd37540d2c469fd0a434e383bea50586a47395f93496a4d82d925f732e305514d1a9d7f21b6074c62445814df6d270c083e9017665b91b7fdea65b774cefb8fc2a50ea99e41d490a86212de9cf90011d04738ed6c62f564f47fbb9726a1500a8ca1f2c615d480a5b3f825ee76e50d1e932cab9d52d7642577a536537d6a0369b9fc9422811b9351f862f5d056fde325cfcb414a4fca9e979901482f6e0dfad44ef05935e02cf574bcb896c89e3e36b3d43f6e584d54e907e90b2f5358932f8b6aa0666d486057b7423cafa39906d974131c57572e625f537e6706395bb5b85e7a53b19483f44e392b02e73a517d88b7af907847b09ae4dc15837b68d28c550f5d69a312bfb65d6170d7137a263d8943639a1c29de9a377559b940893102be2aac92cc18ec6cc581e24bfdd59936a762b6dc4abbf5fcc7a5a6a170ad57672d0ba232c6afa45a386659a57dfeb31d0f8bf2a52786c694237a261ce123d6dbf040d1e4225e36ef6c55841a51185a5d1e1b99514088da46f67c94a391e3171fa8685232773ae5e11fc290ef983f35d4dc08e1677e82dc6f33d255e2bcd76fe9f3ce974133652e87bc6a82907d8e552b2bbc65c9cc5144026cfc7f7a10174565ecbf96f4428a652a2d0e7bebbc2e3d153875cb14d81aa3c456530149bd9003206752826d2bad37824316829f71dde34738370cb133df78e16ea7773cec32f7542b1ff9c078aed4f68ef71fdc283cbfb5bd44c16bc6ca04957fbc89783bfe66904e66310a4a1e8e2d2d69f63611b25224c2629e9daed9f47c16ffbbe1336c014844cc8999908439d7d1f4e057eea0101cdae3f24df10e31c1678c95e04733fdf4849d5e68e5fa4a2f922ebd75541c0e27335a870a43b6a7a03def2faa1183fb5faf6b3636d1623ee29258c1ecb771513e2bb1a7e249dd9576981dadf5a70c0e31639eaef0111cc3b100dc3567c158d16ffbc416cf2ea0c723ebb1e93e27a330a535b75d5890e126c5a820f4a46987fd528380eb42919462c94fa4b4b23e3999417ef2725f375a3537164eb5f619148de645361ec3b77a78d619dd18cbcee0ebde1b872765e6140c6e552b600e048a702e08005f6819c503befe78024f3080d0ef4028e528d80e2b5b4dc2f8381bd5044004628389627880ed80acd0fc706d680b833db5503131cd43b64201a21b84ea8afff45c01e1411f1eb4e7775bbfc12c662c384d9560b1c48e22d763a85b6d718638e7387a92c39482a4ece8fefe861fcc51dfed542330236c221f3b2c7faabaa2c1664781f2c65e068838f9319a0fd87f49cee36e92ed38cedcf2c32bf45a2a1dbc5644d576d7b1e72b26f8178b9834c1afcca8382e6590347970cb3a5682004ec1ce099ee16b281c423cc3868880a6a6bbd5b2637d7676818ce0944df2740d7549605af1e113c9690a6b3172ce88fd81b5dc785229e50135409c6d955a230b169b8151f4b72b16786ad5a5712549ef5ded9ef4a1f27eef31cd7963be7e6d9508650b3dbe427cd381ae4e0bb6c91ef56c9286cfaf3699fbc303034b42da53b4a1428feee930aa133cea2d7eeed413d916a9f8b4d4b0b537ad15d4c49f977f76eaaaeda313606447434aac509fa6c2fdbfed69f2829920f1c2eb9b22f9b151abe12ef07d9b2d18ac396bdd016e9981f3ff6e4a00d8062c9e30fa688fe8d2ab2cb2633c1cd57cc3ae9f5a72d20a5ef14fafba1abe2ad0c0064415e89c4d349abf671c37d7ebefd506cd23a81adcbe51e1b0baf704db00204dba7403297bf7dabed25762b8fb4013ed0f9f39d63b5900a57114f8506b0ea5c4ca4540d3f2cb9ff8f845cbaca7f2f109ece62ada95388ac940cef8dae1d6ddf1bf985e790ef8239c72ddd9e1863654483fd5812ca2f817ae14fbd76b741877b4e9d0644425ca56053aa3b14a8729af6f15a11bcf1a22c532d0296af5d5fd3c1869674a3b83784541e1160960cd9ab37435251824f41f686eb7138f157c228bbf940543e56c74ab02012271f254fb190e726ebb979266195c2e1ee440aecdfb5b627c6d801df3840bf2f3cb4ce11a254a531772c34fa9fe1158ebb0d17d864152d077be1a2e77cf788797f29550ed6542ab30fde69ad353661047098bfb1b124229ed5ed59a8778f4a3160179f6a25f27d5f26e35c48ad05908a13ad14ae979e1ca016264ca95841e61c758cec742bf8df0f13e575f8aff61a8c5ed04da0c8325825cb164301739d88a0ed797bb4dfefa34fc5be4028f9fc95748903c5ffdadd6d51f6ace1e16068df87455c84d88e2e1be5502d713aec8d8ba171b03736d2be4afb10eb1293e2ddccd527a5ead0114b4823e0dbb61cd68a22f800baa534c8ae541af89d6a261b570d3c2504d57f9f60d6ee211faae8b6a189588f12029edf6dd1275bafe72e61bbb9ebd0dcd7059e0374690fb5b5431cc9bb5eaa8a0fd9751d25f27db125bce1fe9bc458ec431e1cd849d7dc75ec1f0758f65f089752e71de272e4653f797ec7e359501c4a4b913c84cae06efcd6f1b7a7763ee16da7f22418f223e8256076fb782bc44bd104d0e97183c3cd46cc3bbe434d87c85829e874904a3d98d8b8bf949667ddf9cd9f3b01351492b778e2382f9e9928213b397dc9ab86e3725d7dcdc4d7120ba008c7d72eabc760ef5a5281fbc77a1f0c5c0d6f34dac26fe28f4a2b3a6c01d85c3c2f6647618cad98ba4e9b3076daac62c5c348af3d34de1b71cb4856adb80096292e1a895c9bbc3db2bcfba10c96cab2ec466cfade0ded32736c50590a5bb46d2794ad85c0b7e2e45d5203818ebe409b3e3caac71499e91cc7ed49a73aeb2be4b763ab00b87f8774cd032c33dd6fc4b5b0be7bda1618ae06234d36df377b1d7e3", 0x1000}], 0x1, &(0x7f0000001840)=[@rights={{0x38, 0x1, 0x1, [r1, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r6, 0xffffffffffffffff, r0, 0xffffffffffffffff, r5]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xee01}}}], 0x98, 0x8800}}, {{&(0x7f0000001900)=@abs={0x0, 0x0, 0x4e22}, 0x6e, &(0x7f0000001c80)=[{&(0x7f0000001980)="c31c63c6fc17a7878c2d014a4cf10d80e98d00f25ab4347ce98c8f8c7d14e4f83f353740d41486a0c0b57706301e443504b1e793aebfbbe4a27cc55d2cd6135e894a5f572a3bbb0fc6ba54f09d9e34abff8e5246bfe24edcc422bda90355eecb", 0x60}, {&(0x7f0000001a00)="4e66b989334efe22bc40d9aeb902c7886d49bc2b2a7fc762ca47104b9c95e6287da4af7e8c56769cf234cde84c52312d3589864adfd46f97983243fcb03d68b2dac329cc9a1522ebe28ad2a4d7750f7d341ac93ea90499c162d544bf74dfa7dbd8f1d5c19fad6c29e41e8d6375c03424d33d3ffbbd06fc8c1874edaa8ed0570760d7fd1ea984ccf1d44da8cf6777bb4cc130c8285769a2d4", 0x98}, {&(0x7f0000001ac0)="3520935f7fd4f7", 0x7}, {&(0x7f0000001b00)="874ef925d785fdad7afbeeb2478f2c032be1f67888cd148c485eac59f93260c3664aa97f1274984042f8e73242b4db502db0d7300cb82828803aafec769f1d8e8cec7e008fb176dc7d41fce6caee780c9d0d882e0e52e1e61a6f930913f3569e0b94ae67beea8f31af7800e087080ca4a0eef746d676fa329b3348ee845d3552e7494edf5ee03f682e19ac25c52d4c838940c0df421f08e56a048caa6159d36ee9163fa9208af7a5af6741139e5523621e26f398a901320dab162d40648b4902", 0xc0}, {&(0x7f0000001bc0)="fa383f80b8ca2b1743563556a777cf8b8f2824b3c18603600e896da7a14cdb2971675f409b28fe9a973ce9bcd0c2e16f06f41351d442053a9f8da7c02164e61c0bac483943c5886101e4c966139fe5879e78d2c7c6b0278a07fd7e24a9659be73631c7c9e56503397412f4e2a06b2933c288f33861a31d9b1dddaec3644f46eea5ed5340b9549d40103e14192b4431e77b15491239f9e107f8fd7276053a9f58d5201bd3ca741b61eebd476f3f2a", 0xae}], 0x5, &(0x7f0000001f00)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}, @rights={{0x24, 0x1, 0x1, [0xffffffffffffffff, r10, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c}}, @rights={{0x14, 0x1, 0x1, [r10]}}], 0x80, 0x4001}}, {{&(0x7f0000001f80)=@abs={0x1, 0x0, 0x4e23}, 0x6e, &(0x7f0000003280)=[{&(0x7f0000002000)="84f1f5eb928cc68d47bf2d7b1265c6f184052ee7c0bdcb0d7b", 0x19}, {&(0x7f0000002040)="9ef40a5b3aa83119aaabf889afec8e718d01a6f3eb076a784c71a723b7cec93b732201bc67efe90ba485af5f7e0b64ea5afe750fe58c341989468fd851d3e0dafa8bf74b449a6b9093c1f712ff5816453abfbb2ffeb10291d8c1893a6782c9ff1756d9cc9be16cbcc36e024a722bdfcee7878a54fca31d286f2551f47407eb70347ac7cb287df14b7a3341e3640e975ebbfaf3225a8106cf6dbd5cbaae20b6f07f1e4526a8e2a1ab23399bc4ba5ffe995d97098189278e6048cc1bbbb90b7c631f3f20c19f9aff0a4cf2e71e6d7fa025c55d5558d04fb96bb6cab8ddc52b", 0xde}, {&(0x7f0000002140)="0b749a1a1881daab290088abb87f13fcc536dfec2ae8d6e26a3551fb68a99599ac5433ae2a1486490fe68ae06cf3d6d8a1c422db7659d41cde2577317de4fab4f9e5967b48004913d3990ba01b944c60f73bce26fce9bb2ceca7c7ce612da1ef197a96bb038d3ea0bbc41532e0f0742d064f0335f4bba6e5778a897878f4e09b9a6c3eca5d81150b49070de39c885ecfeb4bf4bb1b76cd4926c71e5525df62a44df4b78553265807e4919e2710f4e01d17bc3c0cb566bbd6ea2fb2df78a574de149cc8685887b841ce7f45fd92e7beae79bb8aaa4a98a5c00a1b911b67c63cacd723929aa9f9f988d9337fa203ed7f47b1a331fed1a372e6be", 0xf9}, {&(0x7f0000002240)="4afa2004bc792e21892b6e26e840ad493a8f6205c083b555a5a6ae924827e653e6444aac", 0x24}, {&(0x7f0000002280)="067b138b9ec5e63c297757ec4575b06c5183d7bf38fb02b1ae0c46e45ed35777803a9e7bba0e24ab3dc3a7fab6fd914441a1e3ba59cdd83b84c1383a404d10bfbf1ce87b47f379ee87fcdc4006b60bff9ceaea4dfb6bc93fae3bbf8f68376189ac670d6551dd00397506e981e1418e584634d6cd87b36cbedbadd7fb81d0a7b2e4a5271b52f986462aba0f3961195dd944b96daf9f97bce37108ebee6b156200d67746f97ccdfc2e0af6bddf3499439b796aea5f7aca09e6c7c791fe148845566b16065b14c2558dbcfa35adc3362c1f36b7d2681ed9cea7868d7322fa0e7dcbaff01d1da627a2df36bba8b61af7dfd4af2758982c83a3eecdff2b0f814be72b97f751c9e82e0de2455280a1e1b610c3b094dc5c57b9819f47a5db772c66365006508d24b2201a6f314f320b42b32b27c63aa5544672ebead77a29a0062b98ac5cc37c8fe4595584f303d434ff762dfde9a8f2cae67d447457309176ec5be9ba29b070335ce2f6a9aca11298312d5a3dff053b2a44765432b65a019e7ac6d709a0477deaa021bf0dda70f937f2bc9fcdafcd605407fac154a8585cff53dbaf18dcd1f70d6a63adeec4627c19f4bb1898f5921e2c70065774a7fed6597aa1d80df8fa80f6a981d7ec824f2561ea607d0172e2e35082dce1abb29b5cdafab7edb1735c10d94cb078f6b87b6e9039b64a898115abf307d6d98258dc3087cab7bf472c36e09a67abc9a181b4bdd9883cd59176309d11d403c0284a0b8e2c790449aa6ca98d64368b3923a235252228cf973422a9c373e32d473eba2b6f36c8bbe24cb1f1a9cc3d8720352467dd2e6189e0ee5102720ed70c3fa86b374e81c339ff3378fd33918fccb96decaf1dd7eb9dc73a3d0170a92f5f94a5b6f286706b89a5db779d81f538682292f55d55b79c0c0b19aa8401d7b7795c43cf12aa3985037c5ee99749a74780e94850859862b55fda4c02c71ef37934a25b2983f1e5505644c5ff02f76f097140cd7b9180b89a08f91e42cfc807aa1640b399594aa731911261a69afd364a08220c65c2d5bef35891326be00baf710994eafea36adaa59cb72101c80f28c10405e47916bb64210e5239b117965a647091906812b3d262cd9241a87d8cb1d429917c2280b95163a5247f613e11ac3e0432c0b116572031a69b47253fc76a9b860d7ef962e18a2da79091740a89d550e076ed344f765e7948b577e5a3cc5a3de4ee85fbebd92f38d3fcafc8545a8b4b106b0113c726493d21eecd8bcb2522c949cfc5f4e8fec92496b06b151df2c7682452f90c1ffe5be234b867dd2713e6300dfe5047a447aaafb3016506dc622f258bf44078e2fb85be9e1904fc73fb7bd670de4efd3c1b2313ab9344ca3a2d3ce65ea3f6287c2ffb7f72129a66c0b9682969231a2392913d63d05fc22e06ee04300b5dcb64c2c12899a3706e0362ee75121ef7584c7abc5f95be34acb31ff4802f1878ace3506f3505f91c4b33320d3fdb3b5d14e08e739d32dfcf04238cce0e8db6d444187f5ab409515797db71185d5db6faea3b1fa690d77b2f1c5dd133cf407508665b29eaac42ecefbcc754a4f0811982f7a2f229ff9a4fd92ca882b5a05efd0d389189185233817e695856b9901684340e3639ea4f18d55610774f3e0950118889f0641ec78506e3625cbfea2f4f88597760ea4e5fe5c3220a6a4f35ebf270b5698895250be3afe779633a6df1fbdef9e1ef95bae62285d957056c1c2ada6a15944183ac86acb8860be14b3066a509f199547d33cc088a8bbd1e689770e285acf32ccecc1741df2f485978ae35841a53ae50fd24768873ef7335225668aab57921dc3e600db5da8334afab8ffef170e6f33d64080a3eff185e1afb39b7561beef66e2a35a52f4541759e21c21917b0c23a06ee3d4d0c9a83c278e6208966f1a1f98ea6fa4c2c8e9d31e56cfaf4d34559817c168e0bba9425a1d45eae086113aaea0a56c48ccd2d9181aa343489c9ef7a30b84adb7bf85700dfa86a03f7954945e5248d591bf5b41c7b503eb9d98f5d9081ecd2ee4c6f1597576ce6953b10c5768ec7d748c8a79b3b07ae36aa73d0e0f5b15461c45edee05eafcdbdc6b80267e00ca815dff708105365222b8fe674aa2f0a7d404dd11d5c239c29d413998af703feb952d91600c1f269e4324ec87ee0d6f49774787bca42453624bba7d3c0795acc914c82e315a178ece6319d2cd88804765df86174b9491e4e6fdce8765c95660e6f6d5f877c50997e1fd464622179351289006899768e07707c8c25f324757b27fcc817d7f17c088725665c33a75e4aee744b05900b613a5150d9ed290d4bf13cd1ec4c9405d6e059d3e29e8a43be278a787df4f76c9c32b50d7843c1409e3392af13fee4cd50efdc51c0c860ae8f348105cb1e309625d1794d840df94e98c3ebabd67f31385ed25fac10d7165592bc1981f0d6e31780c8e38a63d70970dd19873a6cb4f8df5d49b705864ddd3d367617469264f8cd5f21cab46035dff7fc33206e9c07d2d700bf805b14036222e46f64e807b1bfda22cb5db8191e4038334a5e89bd7f5b2bb2bba97192ada3b2ba5a4a624b33a468eff87e58035b7796ee1d45fa0a7e0d2c4055ff99a0428d7f4d2fbb8ca3396811080db8e7b64547e8cd6d62e05517a979a5d660e82ac2aa0d7824cc24213e9f4be0ef3ea97bcf06e82080262dd459ba6a8e95e9fce0d8df99f33f2fa1daf68a931e35fa550d99f4a532b596b5ae23233cad11d0d6d0915a7102d93a80ed600b7e511367c272492e524f89c327ff174bbe3356b6e64778e13fa81a40013ae239c55effa6bd5b183829ff7dc66f467da310a8027217beeda1f8ea0dd2d115e8ad4fe03df121097ee226821c9aa49fad5aace9db71096e3ac43d877d20f35ce876e76f582b66f04f14279872363b31cf595a152a7993638dee212ee59fc04e1b5e3b89618966374ab00a92c7035f42433bb4bc541aaae8e9d022c5f89a8547db9c6a50dc7ed4ad39cd84824ce80e65ba39290471f1ea357ec328bec104d658d1e2ecbfdcfb1c8ffe82236845b518a6045440c3ccd66f46a8a88eec96dd1e8f48120488dc04c6e45635740002af9d006173db2d9d54ceba70bb6edb443e961d4799b1431d8e5dfbc4b6d0ed29d50297c4518787d838cd1ea5f18d9c3efd0e2c9022acf984a790d370862d31e5ec2d4dcee710a5b8d3885ce55cf65189f644633de93f425e10ea5a802d3f2c7dbde5d0fab6fd3ebc8eb2e9692c725b032166c3cf447f7d6328f51768885c007329f6728e21d5b9cf043b3d07a4a7e4b1f3271214e6d922f8b5dd6b6dfdcc3bb51f0c7546fbbb85416710b41487e681a90d95c43a109ad686097bdfc7e0cc55e666378d19b39c29d13892d7105bb1f4e634efc024945cf5612a0b0975b7f7721d4bc156b3fc94a8684d8dc933b13c7eb2a8004b4a6d43394125d9cbbe3e96ab3291e89d9c7552598876a40871d583a4bc8e53d669737be8e5840b13015888db3e2494adc7c03f88a405b820a5d22ef4b94df741370f3277c681b49d258e110dc36bd167e7fab142e2818768b20a7a6c6086c624d8620546a00de022508b8fabe73ea467877997511334e17cb5f22f1867fdab46db790710a67f01fb18bc7cfc1e2a217e0e8e78d533496f6ef8a73077417463d9ac5380d45f8d60537631a424ab213aa4b6258fdc950f74ca0c66e8da0d8635f42396dbf87763708e973b09031c4eac431fe6fa75479077b5a304684a75d9c31f030023f5543829e2ec37f4242a2265be197faa876ff48e98f797e5dc11de9addb4ef86f632ce84d87a3d44038112ea4386ff299854eb0dc39be0048f46b8d8c43907c62fd85c24ded6f658cf8ccaf30d109277eddad0b98ad729174ae0157d6590b47f1b84c095b7a2c93b74a9d1f4fb9677d3ff4e36c75c45b37aa8ebe1fa1a7aec828e46a866dd9d270b7c1bfd872897b38fa39a6fc850ddbf67d3ea7d5ceb0138bdb4df7130a63285df59ee69956f986300df49749da6eca658bbf92d57b2a1bb87fc7dc31ce0f425d816636fc232c0bcf938a8e8e946b06c822d1af085778df668eded7f6af4f74b926597f2572faa20d59887294ddabb0a970d2582127c879e645d9d3da7e03e0005db736c4b5241d8baf2f990763087931ca61851357aa5f3a2a23c4c5ffb96c4d29afb1d53e0d86f75c85d4283996061beed1117d7245421d5b0a2cfcad4a36835eaf67d45f177d63197676b34343a87eb3f037b8b18a26d87d817e5d92f41ce9c3596a26b3162245ce768128ac868df48ed1caa1d3e69d9b114242801869adb5fad39d47f9790cc450a05ebb1d8b320cbc70a6ed4f09b139c7427a683c9a9669aa5f87e2b23ac40c161ba9891d578617a99a7e5abcc7fcadbc760fd11c334feb31c26cb953eac52fe6a7e042ccfea6f91399c29939b94c6039a46880a39d13f186a6cbf580cf124477c22cffc1fe72764fa63423832a8b1317b17882566963806b6374700210aff7497a70dac9f876f784750d745f7a5c1ad197f5ae4ec622bb7c8c852f5eb9f35cf37b2b1166c617e9edf2130127677b468ed2bcdd11ad56a64da3cbd5af60b4e971d401a846ed90cb2abc4ba9d49f29acb86cf904e206c2fac1561693aa4974cf4f6cda57ed1c626aaae13307f2020e81b8eb6edea0406ace3ca0007eea43e9272050032eb9d669f6ba6661298b99f56fb1e672ac987cf0bef5792fd7ce58212007644e0315af3c0b17aba76e4bbd82c5b46215050349fb4d5236b9be1414453ca29fd285a5676afa69ffe21e5102a2fe14513e5e2c9f6f6ebad3723512b4e23699e0a0ed8e8f9381302e8c9ab5945aed927ed52752ea5c7805263f9b2d096a9917befb52e120ceff3ffe38ff9a2dd92fcf15bc4eda0c55f85dbb6130906b49ff9550268a09458e459d2b84dca83e10723bdb61ca60db7f4e335d27860bd974c4cef1679a88d246c77895d4f39e1956fab355ae8fac26fdee826180f0cde22ac8cc51645c1d5b389d5c46797e3e09739ef31432da1149599c1a5cc6711588f96b078d139864a65c59af4e1d82c42c5134214d4cb078f059c8a6124ab030f8fd0ed63899b5e030fba4af3d31d151e520aeb185c1e0f72288ff6471d26a746e03414c458ac19b814e8a2f2ce88da4a1bf668ef52feaef431d65de0737db876dc703ae730c4bd95eac1bef51c0b13ed3eea9891448ed8814e93de0c8113ed5799060f94c07f28ee8f1f6028380fc5329b7170f27800786310c00b1aca79cda5f4dbfc7af68893d7105799210549ae9ecdb83a40dd0ca271d3714fc6911059bba82983458cb33c8d341bee46780bab086fc9c864783cdf3ec52b22afd05e7044e00907524206878131e82538fd5d07b29fcfc60c1ffb3123006b519e175d8282166770a1e2599902923479e2d6dede880cf883dc46a2545cb5caa0d7b23cce37905c31832e593ae09bb7484e1cc2f5f6aa5b57bdf4d7bbcb32bccfac5c0ec2ad6b4701b8bedaefd699d33e0a789c46c72ffbc3cab143b02065f8b2a2f9bc2c18dd8dfb68213d9c15949d7dc3f20b3658ccd88ea111fa4c8ca1da1850f398f196ff3f61dd7175e9cb42ff0e998266cfc584f54d30e410b3c8a26543cbc41a500a85659b6d1ad256ef13b7600a3bb089f8d4d6200f60d85189cdfb04d0445774fcc13756454eb6f59baae774378f3c6a9e698250719801e1e154b56e523f2bdc41b10044b719a74eec2eb92a063ec9004dd6ddfb63162fa74de1b6c3dc4b0764bc529925c874bf", 0x1000}], 0x5, &(0x7f0000003440)=[@rights={{0x18, 0x1, 0x1, [r3, r10]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xffffffffffffffff, 0xffffffffffffffff}}}, @rights={{0x24, 0x1, 0x1, [0xffffffffffffffff, r4, r9, r9, 0xffffffffffffffff]}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, r5]}}, @rights={{0x24, 0x1, 0x1, [r4, 0xffffffffffffffff, r2, r5, 0xffffffffffffffff]}}, @rights={{0x34, 0x1, 0x1, [r3, 0xffffffffffffffff, r1, 0xffffffffffffffff, 0xffffffffffffffff, r1, 0xffffffffffffffff, r1, 0xffffffffffffffff]}}, @rights={{0x20, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, r1, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0x0, 0xee00}}}], 0x120, 0x4048051}}, {{0x0, 0x0, &(0x7f00000039c0)=[{&(0x7f0000003580)="a1ecd5e53c0083ce40c41015e099c32ff4194d5ce0dbdedb87953a1235884f5c6df78d8defe408da94bd7f83bf955f80700f0f97a4b8528dbc63dc748992c08a7ba35ae02fecae946cac1c59c49ba34cae96f1943519e51851a90b04512e40237160ccae119fd5ee2867d7376c971ad8d87493ce65dd125648cf042241c205e5c81a3d58075c5eeb04cbb520d518f50e9831d37967409026eb00d6774271efec2df5a239f40f8835696b10392d308a8335e317a9a6e1eefb504b36af1623481580344a813090c470e97d9c1c88990d242d9f8d2331a78b6937ea3055df175e6ecd74fe567e92f3fef758f6149231a541", 0xf0}, {&(0x7f0000003680)="6b14d4e76866e7cf53e32eb49b7700b1e1f591d762cd65e00e7d130a9e8be0bd96f2502b9ece86064f2ed59d13debaba231b4693ed7c1c96d6e3fe0b494f099291657cecafec504535bb4e917a836a07e9631b504928cddd93cc0b2c32389e42423382d54662b97a4033feb404bb15dfd817b2f692f21f8f9ff2c4e4daa4eaee00cc1f36ee7ff6f51a324740012ecb5b6c96da28966f5b7053747ad1a4d5ab391c47b2451a380bb12811d2c1b2f1b9", 0xaf}, {&(0x7f0000003740)="e49c3588942f8f0e5d65f54e3be58f253664ea3581674e5a856f3401c29f26eb6b945bec1ea4664b6815c27c31204e07327ff21e72c7f551c6fb32be820982c10bc40194d6394c0efe67d088b0c031d95723d98b2b7b4c8f58ac1b7cf877fd7091913bd55949f7691116a993e12cc52e498ebe484824663dfcac49fa1791f7ef0d3268fe3099b9186affd62f5073847d371d57c4d6df8df6071e7e258ba0fed52e95b3c989faa4c8adbe580fc90320f08e15e50eeb9f1d3235fcce1e", 0xbc}, {&(0x7f0000003800)="42b750160b72cf5ad9f62ea81e0871bfb3f68e7d282d31c527422052d23178dee5d4f9caabfe7ab4af91c8b0113e63ff25e967e4f94bcff9e634054d47308b6909f547b2c7546575c4b41f6f48e24dd936a2fceede939880be4f80d7551ae9f72ffb258c520db9d3e5755d48a895900504fdc1dc2af041e528504ededcba8a", 0x7f}, {&(0x7f0000003880)="1e84bdd4929d0d47d5936392fb71c25131b0026a4e7cbefeeb95c52aeab540628d8d449b64a87c19dbe2fe93a21dde5e8ce2870c085f1bb22796dddd635d56f2b9773be564acff4b20dae9873ac29f7b776a522f8678d176d8cd4b2da7f116703514b0baea", 0x65}, {&(0x7f0000003900)="487d39d4f6b4d3bc97fc9b99dcded37d493658c1abf9b2c8f0e34beffa1d303cd388aedca477d06d551f9d12c2fd30770a0c63d82c8d0546e40b99be0415c4328c6f5c64c3de5ad184e51bf1343503410c4c1b1828c61a753290b2d7de9c0413673eec8a72bfecde3ca96b4294997478b9da8527ba658c2c34ef0d6e338b44a32a25c2441bb61765e9a4834727eb1b26778c13030127df2d9430389a", 0x9c}], 0x6, &(0x7f0000003a40)=[@rights={{0x14, 0x1, 0x1, [r6]}}, @cred={{0x1c}}, @rights={{0x14, 0x1, 0x1, [r1]}}], 0x50, 0x4}}, {{&(0x7f0000003ac0)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000003c80)=[{&(0x7f0000003b40)="78691de75330c7ea904ece90f959ed5d7a09ecd0173feacbf490f132970f7820229084f95f01609ee9b90ee9f69a55bfebab977903dd710936b515e7778393c797f530c77d03d2bbe7367c3ca299109520e10ad2864a4b73f7be4a8b120affe6a13aac515fa8735892fbaa5a316e4fc1a927374bff8d892a99e0c1e87a0e1c13ab", 0x81}, {&(0x7f0000003c00)="080ee4005a26", 0x6}, {&(0x7f0000003c40)}], 0x3, &(0x7f00000040c0)=[@rights={{0x28, 0x1, 0x1, [0xffffffffffffffff, r4, r10, 0xffffffffffffffff, 0xffffffffffffffff, r10]}}, @rights={{0x34, 0x1, 0x1, [r1, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @cred={{0x1c}}], 0xa0, 0x40081}}, {{&(0x7f0000004180)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000004300)=[{&(0x7f0000004200)="4a20a2939b4b442d449e7fe821f35478aa386f64733aa342ff9b6bf12ffa51908a11a72ea94a2fd73cd76496af07d7a9dd33334facd376028431c2aa44fe", 0x3e}, {&(0x7f0000004240)="5c6a284b37e6547bc5948c5f00ca0cb83ee3370e388fbaba63163bb5f2fc81bbf76d5c6df56b06edbfac6e3db95455412e478afdb6075a817711b1e6", 0x3c}, {&(0x7f0000004280)="60c0edf1ad3739923ef15fd1f3ba7a04841db2d70eb1a39633e74a92a91ecb77939e0ceb2e9b836749cfbc85492d67b5fb9739d43d6e7297d5d63e6f6dc8878abf7d87dfe8f3d4", 0x47}], 0x3, &(0x7f0000004400)=[@rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @rights={{0x38, 0x1, 0x1, [r0, r4, 0xffffffffffffffff, r6, r5, 0xffffffffffffffff, r8, r9, r9, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xee01, r12}}}], 0x70}}], 0x6, 0x0) sendmsg$nl_route(r10, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f00000003c0)=ANY=[@ANYBLOB="50000000100001040000ff0f0000000000000000", @ANYRES32=0x0, @ANYBLOB="00000000000000002800128009000100766c616e00000000180002800c0002001c0000001b000000060001000100000008000500", @ANYRES32=r11], 0x50}}, 0x0) getsockname$packet(r9, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) (async) getsockname$packet(r9, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r8, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=ANY=[@ANYBLOB="28000000100025080000000000f15cd9de000000", @ANYRES32=r13, @ANYBLOB="000000000000000008000a0010"], 0x28}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000580)=@dellink={0x20, 0x11, 0x21, 0x0, 0x0, {0x2, 0x0, 0x0, r7}}, 0x20}}, 0x0) [ 2796.475558][T20871] bond742: entered promiscuous mode [ 2796.482126][T20871] 8021q: adding VLAN 0 to HW filter on device bond742 17:02:41 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x4000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2796.518187][T20875] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2796.590620][T20875] bond971: entered promiscuous mode [ 2796.605998][T20875] 8021q: adding VLAN 0 to HW filter on device bond971 [ 2796.747472][T20876] bond971: (slave bridge1037): making interface the new active one [ 2796.769388][T20876] bridge1037: entered promiscuous mode [ 2796.785730][T20876] bond971: (slave bridge1037): Enslaving as an active interface with an up link 17:02:41 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xe6020000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2796.834809][T20881] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2796.893583][T20881] bond1039: entered promiscuous mode [ 2796.901330][T20881] 8021q: adding VLAN 0 to HW filter on device bond1039 [ 2796.979415][T20882] bond1039: (slave bridge1066): making interface the new active one [ 2796.996595][T20882] bridge1066: entered promiscuous mode 17:02:42 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2797.023726][T20882] bond1039: (slave bridge1066): Enslaving as an active interface with an up link [ 2797.046445][T20889] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2797.151996][T20889] bond619: entered promiscuous mode [ 2797.157591][T20889] 8021q: adding VLAN 0 to HW filter on device bond619 17:02:42 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xcb270000}, 0x0) 17:02:42 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0x12}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2797.277504][T20896] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2797.396798][T20895] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2797.427126][T20901] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:02:42 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000200), 0xf000) sendfile(r1, r2, 0x0, 0xf03b0000) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000200), 0xf000) sendfile(r3, r4, 0x0, 0xf03b0000) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r5, r4, &(0x7f00000002c0)=0x335773c3, 0x8) getsockname$tipc(r4, &(0x7f0000000340)=@name, &(0x7f0000000440)=0x10) getsockopt$TIPC_SRC_DROPPABLE(r4, 0x10f, 0x80, &(0x7f0000000480), &(0x7f00000004c0)=0x4) sendfile(r1, r1, &(0x7f0000000180), 0x0) getsockname$packet(r1, &(0x7f0000000280)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000300)=0x14) r6 = socket(0x10, 0x803, 0x0) sendmsg$NBD_CMD_DISCONNECT(r6, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x33}}, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x59) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c00000010000d0700155abff63400000000ff0f", @ANYRES32=r7, @ANYBLOB="00000000e60000001c0012000c000100626f6e64"], 0x3c}}, 0x0) r8 = socket$netlink(0x10, 0x3, 0x0) r9 = socket$packet(0x11, 0x3, 0x300) r10 = socket$nl_route(0x10, 0x3, 0x0) getsockname$packet(r6, &(0x7f00000002c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000240)=0x14) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='rcu_utilization\x00'}, 0x10) r12 = getgid() sendmmsg$unix(0xffffffffffffffff, &(0x7f0000004480)=[{{&(0x7f0000000500)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f00000015c0)=[{&(0x7f00000005c0)="683cffbb2147596fb0e4095bf750d62c61be46d1cdc5a3b63b40f3cc0a8f9b5caacec4ce2a9473653df28e68f5450b0cc6a70fb736e3236d9ff7d616c0a688287f9360e23806dc52c5d273913454788856001fd853c9d747e6149fa83784c986904b0a40de2138ff7cf9c910bdf5f9442b7d9b8b128c222291fade37f64b77e6c453b6e12de245c19649da9603e372b27dd0bc497aa48b82910810eee096fb2bfa783e5738ce160dc4b3142ff74a2fc543d3dccdffc1f27a52f94bd81e0133c6527843b1ec431eb16d8ec9cf881e2a25351705e2b17659f70a5a072475fe0bf536ed05e23c8bb38e5b28da5fc2d73b99003179d6e5698041506a9ec360d0df49b4606f96d6b8d4aa56fbba2ffd5012cb985b7abe217f966c6e01fc4790f65bf18ec175c519e9335ad7349e6610886e524b43389a839e0bcade9f1c98e9f56c2bedf532e5496c9b9c8ec5ae942bef59d6f1d22b85fb0d7a44aad010fbd551e3a5da2dc9c774790e09cea8456520d79da6ea62ffc21175fbefb8f0f55b17e6d1e60af24ff72d51ba8f1b6578601a15c035a28e711bae93c97fc0949f7a06da477c4444c8a914b50478deee0224a4e8c75775f66aea9ab01a5522a89f765073a07f950bcb52bf4b98fcf83be2cbe35baf39b950a6d60a8ba43758dbe7fda45f337d1f0fc231e6d9b26c2db7dba813b0466a3cc16555d6a070b895da380ca4b8a585485f31aaef36bb7608bd962aee4dbb50a9232c1e1f562b31e895a37fa37c9a226041fdbc0e31445698057d1a0228eb75a303df57fd8c435365080d92ca81c6581bfbe97d492d19bf8cd5b1f4021822f5adfe51803c8d6c57907d2ec09c7663c212f18d4dcd547d2a28b42b084cac7d446421ff968d1830494ac1744ff5bb344775ae312a5684da39e24759eaa023f3978d19a986da51bf5986f27c3807a33d40c80270aa228ae58f513426d4cf7f629d7d641c1c73ecfdea8c7e963aa96d6e58a28036480cd2b0974eebb85d6b49b7ccfd3197dedfe26e2af6d772d8165a41738d7031d4b9504c3a803372ebe01711b14edc593cd6715c43753da6bb3e0fdc41e27a199d23ecae356ca9092f57d47ee859252c316ce1b10e705495b42b21140dc8ecd518613e0898c0b1652c9ecda2d03a6624ae3368294eb19f29f3180e1e2263a9e61be3b11a9f7c3f67cf6a061bc68994d1cfe6ca6e692a2b9812c5e261d6551c9428391038c3524bac2c10ae42fb1e8dafaecc64c1ec2c24fcbcffa89d0c06dae7187fc072077de82884d8640d808bae49b4c8624fb9d9fee042e00d4fe08cc0fef2f376ed4e1f3486a72861792f12f4886cb79dce905f6230fb25b3253ccb462b909582f5cb92d2e4b711471458baa24247be07c269884446d0433d30c1be4d28c530997189906f378deed790e401ff8ce06782cf0dcf20ad79c5279356f516866ef2daea3a3277614aa3066651374d64b34e3d42d0dd0b205aa2f85414f42578ec7d04dda949521f7910da5bce5e6fc2dfa306756074a7af0a8bd14bc2e0fc980a88e5d556068d473da35f795ae4c9ffbcbb536aaf79b44c0c880f8b6425bb3ec005a2407727c055876081320bfa813ab47e64422058f6cb50a9abd1a33c1b4abee010bb59f1a93efee4eb1e410fe631e4a4c313d80f8b7af2069ebc287b742b92e1e221db8f8bf3e25a0202fcdfde403a805fce7ecb4df4221e8b238c20a04156c034c7f7448b6a7ca8a3f88a5ecadcb8a8c5cb3f8c0721da72458f8a07eccc99cc9941a4b0e12ee7f336d4beda68d30316eff2f77c0ca73e302c1758e672a9ae6a4c112142f5f47df827c511ba1133fa5aefc476b9c9b10bb8125e09d26e967086fed590bb96688ca45ac451458c0357cfeb2fd480bc2b36d3524ee83f90437b30f041f593e6938f219d82999f3a396e7be4294a93ad48854c80a05cd4fd6622a96988d65983e3a3e007f8e58a52ecaba1b7b0b1cbbd7481a8748ffbf8cf9a9cf286fd02d2be22691e4b7c0b0961c97e26095f6cf91a9c570d020d81ebf6011495bf629018775ae4f9226546bd4c43115c504c4b06289db8c5bcd9581973067a4bb82321ca787a04d16086411ad04f0eba8120f41b40476a2545affe6445e35c781bcd4927b58cfa3e80c51a3850a095fb1b531aa290230ef44217617ed95bc67a043e122c02b3d09faa5b2a4d6fde614b39dc708372f11a301e649057e5a2c87c4fbb57489e80c352cc6b19ac14b6547849675dd50266abc9d41b839e6e5133f04075bf43b84a190d64364fe7cc71abfe00fe1016415f59b7b5f378b026a82c48651f5cc2f5f8ec8bb3d2d8a939b1b460eb3966e17add869b351bed0a023f69b07f60fa04eb36855c0343ee6abbaf3da368e772aa81c8ad3b690e5c08a16b5164fb6065fa415740b6865df14aeab3bd68ff6fdb446483e3f384878a01fc0d49b49dc7730c5caffd3af8c7d3da542885de8dd87c1795c923afa204c35d658e301e75db119d593f2c554387bfe49629881f8cf4dcd4a2a89399fba9408f63e382771df6e2a595737e11f73ef640bf14db2649820a5f2025d15cc3377ccb8ec59f95133021fceeef99be28a8c35b349e4b8e488f485e6a78f308e156720578ad82eebaed183f03e959e29e93f6eb72726823e338f1cbebf77b4525bc1dc2917adbf70f2240060abe58807cc6ae89e320021c0cdcd8257301b0ffeabda4463fa938b3357cf5fc490681e15f8660790d590e55df53e1b72f210ad7aaec827fae37e02ee45e7a583d10e94abd03ad974210e7358d45f41343c7a4c157e231efb10d5c26f1d6c85f280764ebec6911ddeea7af01644f933181dd5772c9f3bfa374f11b0cdc8faa3a39fee76f2bae0d2426e3b43a7ee2d42c3ab14c2f63c473b6e6aabf47811d0c542f87a1b5fe41668dd37540d2c469fd0a434e383bea50586a47395f93496a4d82d925f732e305514d1a9d7f21b6074c62445814df6d270c083e9017665b91b7fdea65b774cefb8fc2a50ea99e41d490a86212de9cf90011d04738ed6c62f564f47fbb9726a1500a8ca1f2c615d480a5b3f825ee76e50d1e932cab9d52d7642577a536537d6a0369b9fc9422811b9351f862f5d056fde325cfcb414a4fca9e979901482f6e0dfad44ef05935e02cf574bcb896c89e3e36b3d43f6e584d54e907e90b2f5358932f8b6aa0666d486057b7423cafa39906d974131c57572e625f537e6706395bb5b85e7a53b19483f44e392b02e73a517d88b7af907847b09ae4dc15837b68d28c550f5d69a312bfb65d6170d7137a263d8943639a1c29de9a377559b940893102be2aac92cc18ec6cc581e24bfdd59936a762b6dc4abbf5fcc7a5a6a170ad57672d0ba232c6afa45a386659a57dfeb31d0f8bf2a52786c694237a261ce123d6dbf040d1e4225e36ef6c55841a51185a5d1e1b99514088da46f67c94a391e3171fa8685232773ae5e11fc290ef983f35d4dc08e1677e82dc6f33d255e2bcd76fe9f3ce974133652e87bc6a82907d8e552b2bbc65c9cc5144026cfc7f7a10174565ecbf96f4428a652a2d0e7bebbc2e3d153875cb14d81aa3c456530149bd9003206752826d2bad37824316829f71dde34738370cb133df78e16ea7773cec32f7542b1ff9c078aed4f68ef71fdc283cbfb5bd44c16bc6ca04957fbc89783bfe66904e66310a4a1e8e2d2d69f63611b25224c2629e9daed9f47c16ffbbe1336c014844cc8999908439d7d1f4e057eea0101cdae3f24df10e31c1678c95e04733fdf4849d5e68e5fa4a2f922ebd75541c0e27335a870a43b6a7a03def2faa1183fb5faf6b3636d1623ee29258c1ecb771513e2bb1a7e249dd9576981dadf5a70c0e31639eaef0111cc3b100dc3567c158d16ffbc416cf2ea0c723ebb1e93e27a330a535b75d5890e126c5a820f4a46987fd528380eb42919462c94fa4b4b23e3999417ef2725f375a3537164eb5f619148de645361ec3b77a78d619dd18cbcee0ebde1b872765e6140c6e552b600e048a702e08005f6819c503befe78024f3080d0ef4028e528d80e2b5b4dc2f8381bd5044004628389627880ed80acd0fc706d680b833db5503131cd43b64201a21b84ea8afff45c01e1411f1eb4e7775bbfc12c662c384d9560b1c48e22d763a85b6d718638e7387a92c39482a4ece8fefe861fcc51dfed542330236c221f3b2c7faabaa2c1664781f2c65e068838f9319a0fd87f49cee36e92ed38cedcf2c32bf45a2a1dbc5644d576d7b1e72b26f8178b9834c1afcca8382e6590347970cb3a5682004ec1ce099ee16b281c423cc3868880a6a6bbd5b2637d7676818ce0944df2740d7549605af1e113c9690a6b3172ce88fd81b5dc785229e50135409c6d955a230b169b8151f4b72b16786ad5a5712549ef5ded9ef4a1f27eef31cd7963be7e6d9508650b3dbe427cd381ae4e0bb6c91ef56c9286cfaf3699fbc303034b42da53b4a1428feee930aa133cea2d7eeed413d916a9f8b4d4b0b537ad15d4c49f977f76eaaaeda313606447434aac509fa6c2fdbfed69f2829920f1c2eb9b22f9b151abe12ef07d9b2d18ac396bdd016e9981f3ff6e4a00d8062c9e30fa688fe8d2ab2cb2633c1cd57cc3ae9f5a72d20a5ef14fafba1abe2ad0c0064415e89c4d349abf671c37d7ebefd506cd23a81adcbe51e1b0baf704db00204dba7403297bf7dabed25762b8fb4013ed0f9f39d63b5900a57114f8506b0ea5c4ca4540d3f2cb9ff8f845cbaca7f2f109ece62ada95388ac940cef8dae1d6ddf1bf985e790ef8239c72ddd9e1863654483fd5812ca2f817ae14fbd76b741877b4e9d0644425ca56053aa3b14a8729af6f15a11bcf1a22c532d0296af5d5fd3c1869674a3b83784541e1160960cd9ab37435251824f41f686eb7138f157c228bbf940543e56c74ab02012271f254fb190e726ebb979266195c2e1ee440aecdfb5b627c6d801df3840bf2f3cb4ce11a254a531772c34fa9fe1158ebb0d17d864152d077be1a2e77cf788797f29550ed6542ab30fde69ad353661047098bfb1b124229ed5ed59a8778f4a3160179f6a25f27d5f26e35c48ad05908a13ad14ae979e1ca016264ca95841e61c758cec742bf8df0f13e575f8aff61a8c5ed04da0c8325825cb164301739d88a0ed797bb4dfefa34fc5be4028f9fc95748903c5ffdadd6d51f6ace1e16068df87455c84d88e2e1be5502d713aec8d8ba171b03736d2be4afb10eb1293e2ddccd527a5ead0114b4823e0dbb61cd68a22f800baa534c8ae541af89d6a261b570d3c2504d57f9f60d6ee211faae8b6a189588f12029edf6dd1275bafe72e61bbb9ebd0dcd7059e0374690fb5b5431cc9bb5eaa8a0fd9751d25f27db125bce1fe9bc458ec431e1cd849d7dc75ec1f0758f65f089752e71de272e4653f797ec7e359501c4a4b913c84cae06efcd6f1b7a7763ee16da7f22418f223e8256076fb782bc44bd104d0e97183c3cd46cc3bbe434d87c85829e874904a3d98d8b8bf949667ddf9cd9f3b01351492b778e2382f9e9928213b397dc9ab86e3725d7dcdc4d7120ba008c7d72eabc760ef5a5281fbc77a1f0c5c0d6f34dac26fe28f4a2b3a6c01d85c3c2f6647618cad98ba4e9b3076daac62c5c348af3d34de1b71cb4856adb80096292e1a895c9bbc3db2bcfba10c96cab2ec466cfade0ded32736c50590a5bb46d2794ad85c0b7e2e45d5203818ebe409b3e3caac71499e91cc7ed49a73aeb2be4b763ab00b87f8774cd032c33dd6fc4b5b0be7bda1618ae06234d36df377b1d7e3", 0x1000}], 0x1, &(0x7f0000001840)=[@rights={{0x38, 0x1, 0x1, [r1, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r6, 0xffffffffffffffff, r0, 0xffffffffffffffff, r5]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xee01}}}], 0x98, 0x8800}}, {{&(0x7f0000001900)=@abs={0x0, 0x0, 0x4e22}, 0x6e, &(0x7f0000001c80)=[{&(0x7f0000001980)="c31c63c6fc17a7878c2d014a4cf10d80e98d00f25ab4347ce98c8f8c7d14e4f83f353740d41486a0c0b57706301e443504b1e793aebfbbe4a27cc55d2cd6135e894a5f572a3bbb0fc6ba54f09d9e34abff8e5246bfe24edcc422bda90355eecb", 0x60}, {&(0x7f0000001a00)="4e66b989334efe22bc40d9aeb902c7886d49bc2b2a7fc762ca47104b9c95e6287da4af7e8c56769cf234cde84c52312d3589864adfd46f97983243fcb03d68b2dac329cc9a1522ebe28ad2a4d7750f7d341ac93ea90499c162d544bf74dfa7dbd8f1d5c19fad6c29e41e8d6375c03424d33d3ffbbd06fc8c1874edaa8ed0570760d7fd1ea984ccf1d44da8cf6777bb4cc130c8285769a2d4", 0x98}, {&(0x7f0000001ac0)="3520935f7fd4f7", 0x7}, {&(0x7f0000001b00)="874ef925d785fdad7afbeeb2478f2c032be1f67888cd148c485eac59f93260c3664aa97f1274984042f8e73242b4db502db0d7300cb82828803aafec769f1d8e8cec7e008fb176dc7d41fce6caee780c9d0d882e0e52e1e61a6f930913f3569e0b94ae67beea8f31af7800e087080ca4a0eef746d676fa329b3348ee845d3552e7494edf5ee03f682e19ac25c52d4c838940c0df421f08e56a048caa6159d36ee9163fa9208af7a5af6741139e5523621e26f398a901320dab162d40648b4902", 0xc0}, {&(0x7f0000001bc0)="fa383f80b8ca2b1743563556a777cf8b8f2824b3c18603600e896da7a14cdb2971675f409b28fe9a973ce9bcd0c2e16f06f41351d442053a9f8da7c02164e61c0bac483943c5886101e4c966139fe5879e78d2c7c6b0278a07fd7e24a9659be73631c7c9e56503397412f4e2a06b2933c288f33861a31d9b1dddaec3644f46eea5ed5340b9549d40103e14192b4431e77b15491239f9e107f8fd7276053a9f58d5201bd3ca741b61eebd476f3f2a", 0xae}], 0x5, &(0x7f0000001f00)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}, @rights={{0x24, 0x1, 0x1, [0xffffffffffffffff, r10, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c}}, @rights={{0x14, 0x1, 0x1, [r10]}}], 0x80, 0x4001}}, {{&(0x7f0000001f80)=@abs={0x1, 0x0, 0x4e23}, 0x6e, &(0x7f0000003280)=[{&(0x7f0000002000)="84f1f5eb928cc68d47bf2d7b1265c6f184052ee7c0bdcb0d7b", 0x19}, {&(0x7f0000002040)="9ef40a5b3aa83119aaabf889afec8e718d01a6f3eb076a784c71a723b7cec93b732201bc67efe90ba485af5f7e0b64ea5afe750fe58c341989468fd851d3e0dafa8bf74b449a6b9093c1f712ff5816453abfbb2ffeb10291d8c1893a6782c9ff1756d9cc9be16cbcc36e024a722bdfcee7878a54fca31d286f2551f47407eb70347ac7cb287df14b7a3341e3640e975ebbfaf3225a8106cf6dbd5cbaae20b6f07f1e4526a8e2a1ab23399bc4ba5ffe995d97098189278e6048cc1bbbb90b7c631f3f20c19f9aff0a4cf2e71e6d7fa025c55d5558d04fb96bb6cab8ddc52b", 0xde}, {&(0x7f0000002140)="0b749a1a1881daab290088abb87f13fcc536dfec2ae8d6e26a3551fb68a99599ac5433ae2a1486490fe68ae06cf3d6d8a1c422db7659d41cde2577317de4fab4f9e5967b48004913d3990ba01b944c60f73bce26fce9bb2ceca7c7ce612da1ef197a96bb038d3ea0bbc41532e0f0742d064f0335f4bba6e5778a897878f4e09b9a6c3eca5d81150b49070de39c885ecfeb4bf4bb1b76cd4926c71e5525df62a44df4b78553265807e4919e2710f4e01d17bc3c0cb566bbd6ea2fb2df78a574de149cc8685887b841ce7f45fd92e7beae79bb8aaa4a98a5c00a1b911b67c63cacd723929aa9f9f988d9337fa203ed7f47b1a331fed1a372e6be", 0xf9}, {&(0x7f0000002240)="4afa2004bc792e21892b6e26e840ad493a8f6205c083b555a5a6ae924827e653e6444aac", 0x24}, {&(0x7f0000002280)="067b138b9ec5e63c297757ec4575b06c5183d7bf38fb02b1ae0c46e45ed35777803a9e7bba0e24ab3dc3a7fab6fd914441a1e3ba59cdd83b84c1383a404d10bfbf1ce87b47f379ee87fcdc4006b60bff9ceaea4dfb6bc93fae3bbf8f68376189ac670d6551dd00397506e981e1418e584634d6cd87b36cbedbadd7fb81d0a7b2e4a5271b52f986462aba0f3961195dd944b96daf9f97bce37108ebee6b156200d67746f97ccdfc2e0af6bddf3499439b796aea5f7aca09e6c7c791fe148845566b16065b14c2558dbcfa35adc3362c1f36b7d2681ed9cea7868d7322fa0e7dcbaff01d1da627a2df36bba8b61af7dfd4af2758982c83a3eecdff2b0f814be72b97f751c9e82e0de2455280a1e1b610c3b094dc5c57b9819f47a5db772c66365006508d24b2201a6f314f320b42b32b27c63aa5544672ebead77a29a0062b98ac5cc37c8fe4595584f303d434ff762dfde9a8f2cae67d447457309176ec5be9ba29b070335ce2f6a9aca11298312d5a3dff053b2a44765432b65a019e7ac6d709a0477deaa021bf0dda70f937f2bc9fcdafcd605407fac154a8585cff53dbaf18dcd1f70d6a63adeec4627c19f4bb1898f5921e2c70065774a7fed6597aa1d80df8fa80f6a981d7ec824f2561ea607d0172e2e35082dce1abb29b5cdafab7edb1735c10d94cb078f6b87b6e9039b64a898115abf307d6d98258dc3087cab7bf472c36e09a67abc9a181b4bdd9883cd59176309d11d403c0284a0b8e2c790449aa6ca98d64368b3923a235252228cf973422a9c373e32d473eba2b6f36c8bbe24cb1f1a9cc3d8720352467dd2e6189e0ee5102720ed70c3fa86b374e81c339ff3378fd33918fccb96decaf1dd7eb9dc73a3d0170a92f5f94a5b6f286706b89a5db779d81f538682292f55d55b79c0c0b19aa8401d7b7795c43cf12aa3985037c5ee99749a74780e94850859862b55fda4c02c71ef37934a25b2983f1e5505644c5ff02f76f097140cd7b9180b89a08f91e42cfc807aa1640b399594aa731911261a69afd364a08220c65c2d5bef35891326be00baf710994eafea36adaa59cb72101c80f28c10405e47916bb64210e5239b117965a647091906812b3d262cd9241a87d8cb1d429917c2280b95163a5247f613e11ac3e0432c0b116572031a69b47253fc76a9b860d7ef962e18a2da79091740a89d550e076ed344f765e7948b577e5a3cc5a3de4ee85fbebd92f38d3fcafc8545a8b4b106b0113c726493d21eecd8bcb2522c949cfc5f4e8fec92496b06b151df2c7682452f90c1ffe5be234b867dd2713e6300dfe5047a447aaafb3016506dc622f258bf44078e2fb85be9e1904fc73fb7bd670de4efd3c1b2313ab9344ca3a2d3ce65ea3f6287c2ffb7f72129a66c0b9682969231a2392913d63d05fc22e06ee04300b5dcb64c2c12899a3706e0362ee75121ef7584c7abc5f95be34acb31ff4802f1878ace3506f3505f91c4b33320d3fdb3b5d14e08e739d32dfcf04238cce0e8db6d444187f5ab409515797db71185d5db6faea3b1fa690d77b2f1c5dd133cf407508665b29eaac42ecefbcc754a4f0811982f7a2f229ff9a4fd92ca882b5a05efd0d389189185233817e695856b9901684340e3639ea4f18d55610774f3e0950118889f0641ec78506e3625cbfea2f4f88597760ea4e5fe5c3220a6a4f35ebf270b5698895250be3afe779633a6df1fbdef9e1ef95bae62285d957056c1c2ada6a15944183ac86acb8860be14b3066a509f199547d33cc088a8bbd1e689770e285acf32ccecc1741df2f485978ae35841a53ae50fd24768873ef7335225668aab57921dc3e600db5da8334afab8ffef170e6f33d64080a3eff185e1afb39b7561beef66e2a35a52f4541759e21c21917b0c23a06ee3d4d0c9a83c278e6208966f1a1f98ea6fa4c2c8e9d31e56cfaf4d34559817c168e0bba9425a1d45eae086113aaea0a56c48ccd2d9181aa343489c9ef7a30b84adb7bf85700dfa86a03f7954945e5248d591bf5b41c7b503eb9d98f5d9081ecd2ee4c6f1597576ce6953b10c5768ec7d748c8a79b3b07ae36aa73d0e0f5b15461c45edee05eafcdbdc6b80267e00ca815dff708105365222b8fe674aa2f0a7d404dd11d5c239c29d413998af703feb952d91600c1f269e4324ec87ee0d6f49774787bca42453624bba7d3c0795acc914c82e315a178ece6319d2cd88804765df86174b9491e4e6fdce8765c95660e6f6d5f877c50997e1fd464622179351289006899768e07707c8c25f324757b27fcc817d7f17c088725665c33a75e4aee744b05900b613a5150d9ed290d4bf13cd1ec4c9405d6e059d3e29e8a43be278a787df4f76c9c32b50d7843c1409e3392af13fee4cd50efdc51c0c860ae8f348105cb1e309625d1794d840df94e98c3ebabd67f31385ed25fac10d7165592bc1981f0d6e31780c8e38a63d70970dd19873a6cb4f8df5d49b705864ddd3d367617469264f8cd5f21cab46035dff7fc33206e9c07d2d700bf805b14036222e46f64e807b1bfda22cb5db8191e4038334a5e89bd7f5b2bb2bba97192ada3b2ba5a4a624b33a468eff87e58035b7796ee1d45fa0a7e0d2c4055ff99a0428d7f4d2fbb8ca3396811080db8e7b64547e8cd6d62e05517a979a5d660e82ac2aa0d7824cc24213e9f4be0ef3ea97bcf06e82080262dd459ba6a8e95e9fce0d8df99f33f2fa1daf68a931e35fa550d99f4a532b596b5ae23233cad11d0d6d0915a7102d93a80ed600b7e511367c272492e524f89c327ff174bbe3356b6e64778e13fa81a40013ae239c55effa6bd5b183829ff7dc66f467da310a8027217beeda1f8ea0dd2d115e8ad4fe03df121097ee226821c9aa49fad5aace9db71096e3ac43d877d20f35ce876e76f582b66f04f14279872363b31cf595a152a7993638dee212ee59fc04e1b5e3b89618966374ab00a92c7035f42433bb4bc541aaae8e9d022c5f89a8547db9c6a50dc7ed4ad39cd84824ce80e65ba39290471f1ea357ec328bec104d658d1e2ecbfdcfb1c8ffe82236845b518a6045440c3ccd66f46a8a88eec96dd1e8f48120488dc04c6e45635740002af9d006173db2d9d54ceba70bb6edb443e961d4799b1431d8e5dfbc4b6d0ed29d50297c4518787d838cd1ea5f18d9c3efd0e2c9022acf984a790d370862d31e5ec2d4dcee710a5b8d3885ce55cf65189f644633de93f425e10ea5a802d3f2c7dbde5d0fab6fd3ebc8eb2e9692c725b032166c3cf447f7d6328f51768885c007329f6728e21d5b9cf043b3d07a4a7e4b1f3271214e6d922f8b5dd6b6dfdcc3bb51f0c7546fbbb85416710b41487e681a90d95c43a109ad686097bdfc7e0cc55e666378d19b39c29d13892d7105bb1f4e634efc024945cf5612a0b0975b7f7721d4bc156b3fc94a8684d8dc933b13c7eb2a8004b4a6d43394125d9cbbe3e96ab3291e89d9c7552598876a40871d583a4bc8e53d669737be8e5840b13015888db3e2494adc7c03f88a405b820a5d22ef4b94df741370f3277c681b49d258e110dc36bd167e7fab142e2818768b20a7a6c6086c624d8620546a00de022508b8fabe73ea467877997511334e17cb5f22f1867fdab46db790710a67f01fb18bc7cfc1e2a217e0e8e78d533496f6ef8a73077417463d9ac5380d45f8d60537631a424ab213aa4b6258fdc950f74ca0c66e8da0d8635f42396dbf87763708e973b09031c4eac431fe6fa75479077b5a304684a75d9c31f030023f5543829e2ec37f4242a2265be197faa876ff48e98f797e5dc11de9addb4ef86f632ce84d87a3d44038112ea4386ff299854eb0dc39be0048f46b8d8c43907c62fd85c24ded6f658cf8ccaf30d109277eddad0b98ad729174ae0157d6590b47f1b84c095b7a2c93b74a9d1f4fb9677d3ff4e36c75c45b37aa8ebe1fa1a7aec828e46a866dd9d270b7c1bfd872897b38fa39a6fc850ddbf67d3ea7d5ceb0138bdb4df7130a63285df59ee69956f986300df49749da6eca658bbf92d57b2a1bb87fc7dc31ce0f425d816636fc232c0bcf938a8e8e946b06c822d1af085778df668eded7f6af4f74b926597f2572faa20d59887294ddabb0a970d2582127c879e645d9d3da7e03e0005db736c4b5241d8baf2f990763087931ca61851357aa5f3a2a23c4c5ffb96c4d29afb1d53e0d86f75c85d4283996061beed1117d7245421d5b0a2cfcad4a36835eaf67d45f177d63197676b34343a87eb3f037b8b18a26d87d817e5d92f41ce9c3596a26b3162245ce768128ac868df48ed1caa1d3e69d9b114242801869adb5fad39d47f9790cc450a05ebb1d8b320cbc70a6ed4f09b139c7427a683c9a9669aa5f87e2b23ac40c161ba9891d578617a99a7e5abcc7fcadbc760fd11c334feb31c26cb953eac52fe6a7e042ccfea6f91399c29939b94c6039a46880a39d13f186a6cbf580cf124477c22cffc1fe72764fa63423832a8b1317b17882566963806b6374700210aff7497a70dac9f876f784750d745f7a5c1ad197f5ae4ec622bb7c8c852f5eb9f35cf37b2b1166c617e9edf2130127677b468ed2bcdd11ad56a64da3cbd5af60b4e971d401a846ed90cb2abc4ba9d49f29acb86cf904e206c2fac1561693aa4974cf4f6cda57ed1c626aaae13307f2020e81b8eb6edea0406ace3ca0007eea43e9272050032eb9d669f6ba6661298b99f56fb1e672ac987cf0bef5792fd7ce58212007644e0315af3c0b17aba76e4bbd82c5b46215050349fb4d5236b9be1414453ca29fd285a5676afa69ffe21e5102a2fe14513e5e2c9f6f6ebad3723512b4e23699e0a0ed8e8f9381302e8c9ab5945aed927ed52752ea5c7805263f9b2d096a9917befb52e120ceff3ffe38ff9a2dd92fcf15bc4eda0c55f85dbb6130906b49ff9550268a09458e459d2b84dca83e10723bdb61ca60db7f4e335d27860bd974c4cef1679a88d246c77895d4f39e1956fab355ae8fac26fdee826180f0cde22ac8cc51645c1d5b389d5c46797e3e09739ef31432da1149599c1a5cc6711588f96b078d139864a65c59af4e1d82c42c5134214d4cb078f059c8a6124ab030f8fd0ed63899b5e030fba4af3d31d151e520aeb185c1e0f72288ff6471d26a746e03414c458ac19b814e8a2f2ce88da4a1bf668ef52feaef431d65de0737db876dc703ae730c4bd95eac1bef51c0b13ed3eea9891448ed8814e93de0c8113ed5799060f94c07f28ee8f1f6028380fc5329b7170f27800786310c00b1aca79cda5f4dbfc7af68893d7105799210549ae9ecdb83a40dd0ca271d3714fc6911059bba82983458cb33c8d341bee46780bab086fc9c864783cdf3ec52b22afd05e7044e00907524206878131e82538fd5d07b29fcfc60c1ffb3123006b519e175d8282166770a1e2599902923479e2d6dede880cf883dc46a2545cb5caa0d7b23cce37905c31832e593ae09bb7484e1cc2f5f6aa5b57bdf4d7bbcb32bccfac5c0ec2ad6b4701b8bedaefd699d33e0a789c46c72ffbc3cab143b02065f8b2a2f9bc2c18dd8dfb68213d9c15949d7dc3f20b3658ccd88ea111fa4c8ca1da1850f398f196ff3f61dd7175e9cb42ff0e998266cfc584f54d30e410b3c8a26543cbc41a500a85659b6d1ad256ef13b7600a3bb089f8d4d6200f60d85189cdfb04d0445774fcc13756454eb6f59baae774378f3c6a9e698250719801e1e154b56e523f2bdc41b10044b719a74eec2eb92a063ec9004dd6ddfb63162fa74de1b6c3dc4b0764bc529925c874bf", 0x1000}], 0x5, &(0x7f0000003440)=[@rights={{0x18, 0x1, 0x1, [r3, r10]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xffffffffffffffff, 0xffffffffffffffff}}}, @rights={{0x24, 0x1, 0x1, [0xffffffffffffffff, r4, r9, r9, 0xffffffffffffffff]}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, r5]}}, @rights={{0x24, 0x1, 0x1, [r4, 0xffffffffffffffff, r2, r5, 0xffffffffffffffff]}}, @rights={{0x34, 0x1, 0x1, [r3, 0xffffffffffffffff, r1, 0xffffffffffffffff, 0xffffffffffffffff, r1, 0xffffffffffffffff, r1, 0xffffffffffffffff]}}, @rights={{0x20, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, r1, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0x0, 0xee00}}}], 0x120, 0x4048051}}, {{0x0, 0x0, &(0x7f00000039c0)=[{&(0x7f0000003580)="a1ecd5e53c0083ce40c41015e099c32ff4194d5ce0dbdedb87953a1235884f5c6df78d8defe408da94bd7f83bf955f80700f0f97a4b8528dbc63dc748992c08a7ba35ae02fecae946cac1c59c49ba34cae96f1943519e51851a90b04512e40237160ccae119fd5ee2867d7376c971ad8d87493ce65dd125648cf042241c205e5c81a3d58075c5eeb04cbb520d518f50e9831d37967409026eb00d6774271efec2df5a239f40f8835696b10392d308a8335e317a9a6e1eefb504b36af1623481580344a813090c470e97d9c1c88990d242d9f8d2331a78b6937ea3055df175e6ecd74fe567e92f3fef758f6149231a541", 0xf0}, {&(0x7f0000003680)="6b14d4e76866e7cf53e32eb49b7700b1e1f591d762cd65e00e7d130a9e8be0bd96f2502b9ece86064f2ed59d13debaba231b4693ed7c1c96d6e3fe0b494f099291657cecafec504535bb4e917a836a07e9631b504928cddd93cc0b2c32389e42423382d54662b97a4033feb404bb15dfd817b2f692f21f8f9ff2c4e4daa4eaee00cc1f36ee7ff6f51a324740012ecb5b6c96da28966f5b7053747ad1a4d5ab391c47b2451a380bb12811d2c1b2f1b9", 0xaf}, {&(0x7f0000003740)="e49c3588942f8f0e5d65f54e3be58f253664ea3581674e5a856f3401c29f26eb6b945bec1ea4664b6815c27c31204e07327ff21e72c7f551c6fb32be820982c10bc40194d6394c0efe67d088b0c031d95723d98b2b7b4c8f58ac1b7cf877fd7091913bd55949f7691116a993e12cc52e498ebe484824663dfcac49fa1791f7ef0d3268fe3099b9186affd62f5073847d371d57c4d6df8df6071e7e258ba0fed52e95b3c989faa4c8adbe580fc90320f08e15e50eeb9f1d3235fcce1e", 0xbc}, {&(0x7f0000003800)="42b750160b72cf5ad9f62ea81e0871bfb3f68e7d282d31c527422052d23178dee5d4f9caabfe7ab4af91c8b0113e63ff25e967e4f94bcff9e634054d47308b6909f547b2c7546575c4b41f6f48e24dd936a2fceede939880be4f80d7551ae9f72ffb258c520db9d3e5755d48a895900504fdc1dc2af041e528504ededcba8a", 0x7f}, {&(0x7f0000003880)="1e84bdd4929d0d47d5936392fb71c25131b0026a4e7cbefeeb95c52aeab540628d8d449b64a87c19dbe2fe93a21dde5e8ce2870c085f1bb22796dddd635d56f2b9773be564acff4b20dae9873ac29f7b776a522f8678d176d8cd4b2da7f116703514b0baea", 0x65}, {&(0x7f0000003900)="487d39d4f6b4d3bc97fc9b99dcded37d493658c1abf9b2c8f0e34beffa1d303cd388aedca477d06d551f9d12c2fd30770a0c63d82c8d0546e40b99be0415c4328c6f5c64c3de5ad184e51bf1343503410c4c1b1828c61a753290b2d7de9c0413673eec8a72bfecde3ca96b4294997478b9da8527ba658c2c34ef0d6e338b44a32a25c2441bb61765e9a4834727eb1b26778c13030127df2d9430389a", 0x9c}], 0x6, &(0x7f0000003a40)=[@rights={{0x14, 0x1, 0x1, [r6]}}, @cred={{0x1c}}, @rights={{0x14, 0x1, 0x1, [r1]}}], 0x50, 0x4}}, {{&(0x7f0000003ac0)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000003c80)=[{&(0x7f0000003b40)="78691de75330c7ea904ece90f959ed5d7a09ecd0173feacbf490f132970f7820229084f95f01609ee9b90ee9f69a55bfebab977903dd710936b515e7778393c797f530c77d03d2bbe7367c3ca299109520e10ad2864a4b73f7be4a8b120affe6a13aac515fa8735892fbaa5a316e4fc1a927374bff8d892a99e0c1e87a0e1c13ab", 0x81}, {&(0x7f0000003c00)="080ee4005a26", 0x6}, {&(0x7f0000003c40)}], 0x3, &(0x7f00000040c0)=[@rights={{0x28, 0x1, 0x1, [0xffffffffffffffff, r4, r10, 0xffffffffffffffff, 0xffffffffffffffff, r10]}}, @rights={{0x34, 0x1, 0x1, [r1, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @cred={{0x1c}}], 0xa0, 0x40081}}, {{&(0x7f0000004180)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000004300)=[{&(0x7f0000004200)="4a20a2939b4b442d449e7fe821f35478aa386f64733aa342ff9b6bf12ffa51908a11a72ea94a2fd73cd76496af07d7a9dd33334facd376028431c2aa44fe", 0x3e}, {&(0x7f0000004240)="5c6a284b37e6547bc5948c5f00ca0cb83ee3370e388fbaba63163bb5f2fc81bbf76d5c6df56b06edbfac6e3db95455412e478afdb6075a817711b1e6", 0x3c}, {&(0x7f0000004280)="60c0edf1ad3739923ef15fd1f3ba7a04841db2d70eb1a39633e74a92a91ecb77939e0ceb2e9b836749cfbc85492d67b5fb9739d43d6e7297d5d63e6f6dc8878abf7d87dfe8f3d4", 0x47}], 0x3, &(0x7f0000004400)=[@rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @rights={{0x38, 0x1, 0x1, [r0, r4, 0xffffffffffffffff, r6, r5, 0xffffffffffffffff, r8, r9, r9, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xee01, r12}}}], 0x70}}], 0x6, 0x0) sendmsg$nl_route(r10, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f00000003c0)=ANY=[@ANYBLOB="50000000100001040000ff0f0000000000000000", @ANYRES32=0x0, @ANYBLOB="00000000000000002800128009000100766c616e00000000180002800c0002001c0000001b000000060001000100000008000500", @ANYRES32=r11], 0x50}}, 0x0) getsockname$packet(r9, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r8, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=ANY=[@ANYBLOB="28000000100025080000000000f15cd9de000000", @ANYRES32=r13, @ANYBLOB="000000000000000008000a0010"], 0x28}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000580)=@dellink={0x20, 0x11, 0x21, 0x0, 0x0, {0x2, 0x0, 0x0, r7}}, 0x20}}, 0x0) 17:02:42 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x6000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2797.445361][T20901] workqueue: Failed to create a rescuer kthread for wq "bond743": -EINTR [ 2797.536876][T20907] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2797.631733][T20907] bond972: entered promiscuous mode [ 2797.637651][T20907] 8021q: adding VLAN 0 to HW filter on device bond972 [ 2797.712537][T20908] bond972: (slave bridge1038): making interface the new active one [ 2797.722848][T20908] bridge1038: entered promiscuous mode [ 2797.747129][T20908] bond972: (slave bridge1038): Enslaving as an active interface with an up link 17:02:42 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xe6ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2797.768857][T20912] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2797.864319][T20912] bond1040: entered promiscuous mode [ 2797.870386][T20912] 8021q: adding VLAN 0 to HW filter on device bond1040 [ 2797.991078][T20913] bond1040: (slave bridge1067): making interface the new active one [ 2798.007720][T20913] bridge1067: entered promiscuous mode [ 2798.025239][T20913] bond1040: (slave bridge1067): Enslaving as an active interface with an up link 17:02:43 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:02:43 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xf0ffffff}, 0x0) [ 2798.104442][T20922] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:02:43 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0x63}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2798.191024][T20922] bond620: entered promiscuous mode [ 2798.198511][T20922] 8021q: adding VLAN 0 to HW filter on device bond620 [ 2798.215483][T20925] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.4'. [ 2798.226247][T20929] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2798.351801][T20929] bond743: entered promiscuous mode [ 2798.357435][T20929] 8021q: adding VLAN 0 to HW filter on device bond743 17:02:43 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x8000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2798.395684][T20924] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. 17:02:43 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0x3}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2798.410926][T20924] workqueue: Failed to create a rescuer kthread for wq "bond490": -EINTR [ 2798.507912][T20936] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2798.583297][T20936] bond973: entered promiscuous mode [ 2798.589587][T20936] 8021q: adding VLAN 0 to HW filter on device bond973 [ 2798.671621][T20938] bond973: (slave bridge1039): making interface the new active one [ 2798.680514][T20938] bridge1039: entered promiscuous mode [ 2798.694439][T20938] bond973: (slave bridge1039): Enslaving as an active interface with an up link 17:02:43 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xe7ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2798.772969][T20940] bond1041: entered promiscuous mode [ 2798.779999][T20940] 8021q: adding VLAN 0 to HW filter on device bond1041 [ 2798.930857][T20944] bond1041: (slave bridge1068): making interface the new active one [ 2798.941932][T20944] bridge1068: entered promiscuous mode [ 2798.954209][T20944] bond1041: (slave bridge1068): Enslaving as an active interface with an up link 17:02:43 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xffffa888}, 0x0) 17:02:44 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2799.120558][T20952] bond621: entered promiscuous mode [ 2799.166954][T20952] 8021q: adding VLAN 0 to HW filter on device bond621 [ 2799.193799][T20953] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.4'. 17:02:44 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x2}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2799.281784][T20957] bond744: entered promiscuous mode [ 2799.287393][T20957] 8021q: adding VLAN 0 to HW filter on device bond744 17:02:44 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xa000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2799.443312][T20961] bond490: entered promiscuous mode [ 2799.458564][T20961] 8021q: adding VLAN 0 to HW filter on device bond490 [ 2799.481880][T20962] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.0'. 17:02:44 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0x3}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2799.549507][T20965] bond974: entered promiscuous mode [ 2799.555519][T20965] 8021q: adding VLAN 0 to HW filter on device bond974 17:02:44 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xe8ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2799.661512][T20966] bond974: (slave bridge1040): making interface the new active one [ 2799.669921][T20966] bridge1040: entered promiscuous mode [ 2799.684077][T20966] bond974: (slave bridge1040): Enslaving as an active interface with an up link 17:02:44 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xfffff000}, 0x0) [ 2799.810556][T20974] bond1042: entered promiscuous mode [ 2799.816247][T20974] 8021q: adding VLAN 0 to HW filter on device bond1042 [ 2799.888248][T20976] bond1042: (slave bridge1069): making interface the new active one [ 2799.900242][T20976] bridge1069: entered promiscuous mode [ 2799.912581][T20976] bond1042: (slave bridge1069): Enslaving as an active interface with an up link 17:02:44 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x48, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2799.994951][T20980] bond622: entered promiscuous mode [ 2800.001414][T20980] 8021q: adding VLAN 0 to HW filter on device bond622 17:02:45 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x3}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2800.094108][T20984] bond745: entered promiscuous mode [ 2800.102487][T20984] 8021q: adding VLAN 0 to HW filter on device bond745 17:02:45 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xc000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2800.191117][T20988] bond491: entered promiscuous mode [ 2800.196843][T20988] 8021q: adding VLAN 0 to HW filter on device bond491 [ 2800.211771][T20989] netlink: 16 bytes leftover after parsing attributes in process `syz-executor.0'. 17:02:45 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) sendfile(r0, r1, 0x0, 0xf03b0000) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r2, r1, &(0x7f00000002c0)=0x335773c3, 0x8) bpf$MAP_LOOKUP_BATCH(0x18, &(0x7f00000000c0)={&(0x7f0000000240)="6ef86ee493c2901c16f839f3bbd8f9b5ff5e8a0c295437883c80b6752041b09fccdf64de2d7f4f80f86a1a2b6d91288a3d703399c89e20e2c6b1929962d46a9b2154d54544ed25cd09fce15980121e63d5d7195b6690f633942c238e98e84183c7179104b296ad57a966f69637bc80cf1fe7bc98b56ade520f027f586bad2d0028d2108403bf", &(0x7f0000000300)=""/226, &(0x7f0000000400)="e85a0dca47b99378bb74bc7319bf1556f113d64bebb879a39751fc4c92208ccda034cbff455793432e040e9bb15a823fc90b8e54a6824e55a878eb86b515e129712371c2f71059aeec4e211a921fd185e32e5ac495a7bab083c6949f2d6579f57b64f5b2af7c57c13119c6e9ba50da28e66fb22d66410547e5711ff735c0b9319ae67133466435da2a96b78b13b25b3f2d352a2e7dedda0066dcee82", &(0x7f00000004c0)="171e5a16e5f38e9978f806d4a48b24ffde490ec32ac72779354354876b7fe5c5e29ecafce25fa0d7cf5e5dfaf0d64c9af33c0deb856023fce2a67cc178005e0822a24547dc693ac6473503b591973b0188cc0fad244c145bb208f68d8568a6bdc12659b56bf1b52a926e3f4da2243dd7dfa17a64127fd65cb27f1aa39c033a58f6787740895ae243c937b7458e1312cd66d9a249a58724aeb73116de962e", 0x7fffffff, r2}, 0x38) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r4, &(0x7f0000000480)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r4, 0x0) r5 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000100), 0xffffffffffffffff) sendmsg$ETHTOOL_MSG_COALESCE_SET(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000200)={&(0x7f0000000000)={0x34, r5, 0x1, 0x0, 0x0, {}, [@ETHTOOL_A_COALESCE_HEADER={0x18, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'netdevsim0\x00'}]}, @ETHTOOL_A_COALESCE_RX_USECS_IRQ={0x8}]}, 0x34}}, 0x0) r6 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r6, r3, 0x0, 0x10000a006) 17:02:45 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xffffff7f}, 0x0) [ 2800.331922][ T27] audit: type=1804 audit(1688403765.294:415): pid=21014 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1916/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 [ 2800.376451][ T27] audit: type=1804 audit(1688403765.334:416): pid=21014 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=ToMToU comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1916/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 [ 2800.385972][T20996] bond975: entered promiscuous mode [ 2800.435122][T20996] 8021q: adding VLAN 0 to HW filter on device bond975 [ 2800.526116][T20998] bond975: (slave bridge1041): making interface the new active one [ 2800.578901][T20998] bridge1041: entered promiscuous mode [ 2800.605100][T20998] bond975: (slave bridge1041): Enslaving as an active interface with an up link 17:02:45 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xe9ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2800.670721][T21001] bond1043: entered promiscuous mode [ 2800.676587][T21001] 8021q: adding VLAN 0 to HW filter on device bond1043 17:02:45 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) sendfile(r0, r1, 0x0, 0xf03b0000) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r2, r1, &(0x7f00000002c0)=0x335773c3, 0x8) bpf$MAP_LOOKUP_BATCH(0x18, &(0x7f00000000c0)={&(0x7f0000000240)="6ef86ee493c2901c16f839f3bbd8f9b5ff5e8a0c295437883c80b6752041b09fccdf64de2d7f4f80f86a1a2b6d91288a3d703399c89e20e2c6b1929962d46a9b2154d54544ed25cd09fce15980121e63d5d7195b6690f633942c238e98e84183c7179104b296ad57a966f69637bc80cf1fe7bc98b56ade520f027f586bad2d0028d2108403bf", &(0x7f0000000300)=""/226, &(0x7f0000000400)="e85a0dca47b99378bb74bc7319bf1556f113d64bebb879a39751fc4c92208ccda034cbff455793432e040e9bb15a823fc90b8e54a6824e55a878eb86b515e129712371c2f71059aeec4e211a921fd185e32e5ac495a7bab083c6949f2d6579f57b64f5b2af7c57c13119c6e9ba50da28e66fb22d66410547e5711ff735c0b9319ae67133466435da2a96b78b13b25b3f2d352a2e7dedda0066dcee82", &(0x7f00000004c0)="171e5a16e5f38e9978f806d4a48b24ffde490ec32ac72779354354876b7fe5c5e29ecafce25fa0d7cf5e5dfaf0d64c9af33c0deb856023fce2a67cc178005e0822a24547dc693ac6473503b591973b0188cc0fad244c145bb208f68d8568a6bdc12659b56bf1b52a926e3f4da2243dd7dfa17a64127fd65cb27f1aa39c033a58f6787740895ae243c937b7458e1312cd66d9a249a58724aeb73116de962e", 0x7fffffff, r2}, 0x38) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r4, &(0x7f0000000480)=ANY=[], 0x208e24b) (async, rerun: 32) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r4, 0x0) (async, rerun: 32) r5 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000100), 0xffffffffffffffff) sendmsg$ETHTOOL_MSG_COALESCE_SET(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000200)={&(0x7f0000000000)={0x34, r5, 0x1, 0x0, 0x0, {}, [@ETHTOOL_A_COALESCE_HEADER={0x18, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'netdevsim0\x00'}]}, @ETHTOOL_A_COALESCE_RX_USECS_IRQ={0x8}]}, 0x34}}, 0x0) (async, rerun: 64) r6 = socket$nl_generic(0x10, 0x3, 0x10) (rerun: 64) sendfile(r6, r3, 0x0, 0x10000a006) 17:02:45 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2800.748511][T21003] bond1043: (slave bridge1070): making interface the new active one [ 2800.757788][T21003] bridge1070: entered promiscuous mode [ 2800.772690][T21003] bond1043: (slave bridge1070): Enslaving as an active interface with an up link [ 2800.879729][T21006] bond623: entered promiscuous mode [ 2800.895810][T21006] 8021q: adding VLAN 0 to HW filter on device bond623 [ 2800.913432][ T27] audit: type=1804 audit(1688403765.874:417): pid=21034 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1917/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 17:02:45 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x5}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2801.032201][T21010] bond746: entered promiscuous mode [ 2801.038105][T21010] 8021q: adding VLAN 0 to HW filter on device bond746 [ 2801.039692][ T27] audit: type=1804 audit(1688403765.874:418): pid=21032 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=ToMToU comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1917/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 17:02:46 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xe000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:02:46 executing program 0: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) sendfile(r0, r1, 0x0, 0xf03b0000) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r2, r1, &(0x7f00000002c0)=0x335773c3, 0x8) bpf$MAP_LOOKUP_BATCH(0x18, &(0x7f00000000c0)={&(0x7f0000000240)="6ef86ee493c2901c16f839f3bbd8f9b5ff5e8a0c295437883c80b6752041b09fccdf64de2d7f4f80f86a1a2b6d91288a3d703399c89e20e2c6b1929962d46a9b2154d54544ed25cd09fce15980121e63d5d7195b6690f633942c238e98e84183c7179104b296ad57a966f69637bc80cf1fe7bc98b56ade520f027f586bad2d0028d2108403bf", &(0x7f0000000300)=""/226, &(0x7f0000000400)="e85a0dca47b99378bb74bc7319bf1556f113d64bebb879a39751fc4c92208ccda034cbff455793432e040e9bb15a823fc90b8e54a6824e55a878eb86b515e129712371c2f71059aeec4e211a921fd185e32e5ac495a7bab083c6949f2d6579f57b64f5b2af7c57c13119c6e9ba50da28e66fb22d66410547e5711ff735c0b9319ae67133466435da2a96b78b13b25b3f2d352a2e7dedda0066dcee82", &(0x7f00000004c0)="171e5a16e5f38e9978f806d4a48b24ffde490ec32ac72779354354876b7fe5c5e29ecafce25fa0d7cf5e5dfaf0d64c9af33c0deb856023fce2a67cc178005e0822a24547dc693ac6473503b591973b0188cc0fad244c145bb208f68d8568a6bdc12659b56bf1b52a926e3f4da2243dd7dfa17a64127fd65cb27f1aa39c033a58f6787740895ae243c937b7458e1312cd66d9a249a58724aeb73116de962e", 0x7fffffff, r2}, 0x38) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r4, &(0x7f0000000480)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r4, 0x0) r5 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000100), 0xffffffffffffffff) sendmsg$ETHTOOL_MSG_COALESCE_SET(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000200)={&(0x7f0000000000)={0x34, r5, 0x1, 0x0, 0x0, {}, [@ETHTOOL_A_COALESCE_HEADER={0x18, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'netdevsim0\x00'}]}, @ETHTOOL_A_COALESCE_RX_USECS_IRQ={0x8}]}, 0x34}}, 0x0) r6 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r6, r3, 0x0, 0x10000a006) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) (async) sendfile(r0, r1, 0x0, 0xf03b0000) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) sendfile(r2, r1, &(0x7f00000002c0)=0x335773c3, 0x8) (async) bpf$MAP_LOOKUP_BATCH(0x18, &(0x7f00000000c0)={&(0x7f0000000240)="6ef86ee493c2901c16f839f3bbd8f9b5ff5e8a0c295437883c80b6752041b09fccdf64de2d7f4f80f86a1a2b6d91288a3d703399c89e20e2c6b1929962d46a9b2154d54544ed25cd09fce15980121e63d5d7195b6690f633942c238e98e84183c7179104b296ad57a966f69637bc80cf1fe7bc98b56ade520f027f586bad2d0028d2108403bf", &(0x7f0000000300)=""/226, &(0x7f0000000400)="e85a0dca47b99378bb74bc7319bf1556f113d64bebb879a39751fc4c92208ccda034cbff455793432e040e9bb15a823fc90b8e54a6824e55a878eb86b515e129712371c2f71059aeec4e211a921fd185e32e5ac495a7bab083c6949f2d6579f57b64f5b2af7c57c13119c6e9ba50da28e66fb22d66410547e5711ff735c0b9319ae67133466435da2a96b78b13b25b3f2d352a2e7dedda0066dcee82", &(0x7f00000004c0)="171e5a16e5f38e9978f806d4a48b24ffde490ec32ac72779354354876b7fe5c5e29ecafce25fa0d7cf5e5dfaf0d64c9af33c0deb856023fce2a67cc178005e0822a24547dc693ac6473503b591973b0188cc0fad244c145bb208f68d8568a6bdc12659b56bf1b52a926e3f4da2243dd7dfa17a64127fd65cb27f1aa39c033a58f6787740895ae243c937b7458e1312cd66d9a249a58724aeb73116de962e", 0x7fffffff, r2}, 0x38) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) write$binfmt_script(r4, &(0x7f0000000480)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r4, 0x0) (async) syz_genetlink_get_family_id$ethtool(&(0x7f0000000100), 0xffffffffffffffff) (async) sendmsg$ETHTOOL_MSG_COALESCE_SET(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000200)={&(0x7f0000000000)={0x34, r5, 0x1, 0x0, 0x0, {}, [@ETHTOOL_A_COALESCE_HEADER={0x18, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'netdevsim0\x00'}]}, @ETHTOOL_A_COALESCE_RX_USECS_IRQ={0x8}]}, 0x34}}, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) sendfile(r6, r3, 0x0, 0x10000a006) (async) 17:02:46 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xffffff9e}, 0x0) [ 2801.250934][T21027] bond976: entered promiscuous mode [ 2801.259994][T21027] 8021q: adding VLAN 0 to HW filter on device bond976 [ 2801.286472][ T27] audit: type=1804 audit(1688403766.244:419): pid=21055 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1918/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 [ 2801.314401][ T27] audit: type=1804 audit(1688403766.244:420): pid=21055 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=ToMToU comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1918/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 17:02:46 executing program 0: r0 = socket$packet(0x11, 0x3, 0x300) r1 = socket(0x10, 0x3, 0x0) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) socket$nl_generic(0x10, 0x3, 0x10) sendmsg$BATADV_CMD_GET_MESH(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000280)={0x0}, 0x1, 0x0, 0x0, 0x20000075}, 0x0) getsockname$packet(r3, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) sendfile(r5, r6, 0x0, 0xf03b0000) sendfile(r5, r5, &(0x7f0000000180), 0x0) sendmsg$TIPC_CMD_GET_LINKS(r5, &(0x7f0000000480)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x100}, 0xc, &(0x7f0000000340)={&(0x7f0000000300)={0x24, 0x0, 0x1, 0x70bd26, 0x25dfdbfe, {{}, {}, {0x8, 0x11, 0x8}}, ["", "", "", "", "", "", "", ""]}, 0x24}, 0x1, 0x0, 0x0, 0x4008000}, 0x40001) sendmsg$nl_route(r2, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000a00)=ANY=[@ANYBLOB="090000000000000023eeffffffe0ffe602000040", @ANYRES32=r4, @ANYBLOB="01000000010000001c0012000c0001006272696467651b0e27aa46056da6e25c4e901e86e63236c1d7fab7b162f8ee479fedc831c32d5e9b4292a8153fd531ce0bef6fffffffffb7c3c18d73454752bd912344c9edb5d3ceffcd92237b1bcf7ed31d78ef4b6f3d11e1e72732a820a56aadec7f1dde90621ef3861ae9cf029b5fc7a8052cbeb67ee1fdc50000a1d0edcf1d1fff03919a707c6a9ccc5dc1a5548371893be94318612225d717d6e6b43ed1648b0f7b6b234cc8212d7b43a8b3fd359f4d182bf27fbacde4c3021627d1fa4661e447d73718c2a41a0e03a53499686e5afceb0b1a7fa622c282d4c80303d4fd75bba670d7a1f1aae34d5506e7efa257ce77ff45043301785dc1a333027ee48ef2eb6cfacddfaad1e712b29204c78af6c5d31edbfb06e9f88f13ad25319a0500194c2e3f1124af531f27d22635ee06b04353a5eff2e9be79c3897e14df111cf81f4b61292cef6f3f7c07354302c90fd20d14c5adf7266a072392c99d1d6547a3da8d08bcff5d4db99e01ac560a55d45c43ca3d00"/404], 0x3c}, 0x1, 0x0, 0x0, 0x800}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000240)=@newqdisc={0x2c, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r4, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_qfg={0x8}]}, 0x2c}}, 0x0) bind$packet(r0, &(0x7f0000000100)={0x11, 0x0, r4, 0x1, 0x0, 0x6, @multicast}, 0x14) sendto$packet(r0, &(0x7f0000000380)="93a70b0100001006ff7f00000800", 0x36, 0x0, 0x0, 0x0) r7 = socket$alg(0x26, 0x5, 0x0) recvmsg(r7, &(0x7f00000009c0)={&(0x7f0000000180)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @private}}}, 0x80, &(0x7f0000000880)=[{&(0x7f0000000040)=""/63, 0x3f}, {&(0x7f00000003c0)=""/157, 0x9d}, {&(0x7f0000000500)=""/179, 0xb3}, {&(0x7f00000005c0)=""/223, 0xdf}, {&(0x7f00000006c0)=""/109, 0x6d}, {&(0x7f0000000340)}, {&(0x7f0000000740)=""/116, 0x74}, {&(0x7f0000000800)=""/2, 0x2}, {&(0x7f0000000840)=""/54, 0x36}], 0x9, &(0x7f0000000940)=""/77, 0x4d}, 0x200) [ 2801.445993][T21028] bond976: (slave bridge1042): making interface the new active one [ 2801.454766][T21028] bridge1042: entered promiscuous mode [ 2801.484816][T21028] bond976: (slave bridge1042): Enslaving as an active interface with an up link 17:02:46 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xea020000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2801.507510][T21033] validate_nla: 14 callbacks suppressed [ 2801.507534][T21033] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2801.610590][T21033] bond1044: entered promiscuous mode [ 2801.616957][T21033] 8021q: adding VLAN 0 to HW filter on device bond1044 17:02:46 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2801.694785][T21038] bond1044: (slave bridge1071): making interface the new active one [ 2801.703891][T21038] bridge1071: entered promiscuous mode [ 2801.718975][T21038] bond1044: (slave bridge1071): Enslaving as an active interface with an up link [ 2801.731136][T21046] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2801.813872][T21046] bond747: entered promiscuous mode [ 2801.822781][T21046] 8021q: adding VLAN 0 to HW filter on device bond747 [ 2801.835429][T21044] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2801.890570][T21044] bond624: entered promiscuous mode [ 2801.896595][T21044] 8021q: adding VLAN 0 to HW filter on device bond624 17:02:46 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xfffffff0}, 0x0) 17:02:46 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x7}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2801.995997][T21069] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:02:47 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x10000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:02:47 executing program 0: r0 = socket$packet(0x11, 0x3, 0x300) r1 = socket(0x10, 0x3, 0x0) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) socket$nl_generic(0x10, 0x3, 0x10) sendmsg$BATADV_CMD_GET_MESH(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000280)={0x0}, 0x1, 0x0, 0x0, 0x20000075}, 0x0) getsockname$packet(r3, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) sendfile(r5, r6, 0x0, 0xf03b0000) sendfile(r5, r5, &(0x7f0000000180), 0x0) sendmsg$TIPC_CMD_GET_LINKS(r5, &(0x7f0000000480)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x100}, 0xc, &(0x7f0000000340)={&(0x7f0000000300)={0x24, 0x0, 0x1, 0x70bd26, 0x25dfdbfe, {{}, {}, {0x8, 0x11, 0x8}}, ["", "", "", "", "", "", "", ""]}, 0x24}, 0x1, 0x0, 0x0, 0x4008000}, 0x40001) sendmsg$nl_route(r2, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000a00)=ANY=[@ANYBLOB="090000000000000023eeffffffe0ffe602000040", @ANYRES32=r4, @ANYBLOB="01000000010000001c0012000c0001006272696467651b0e27aa46056da6e25c4e901e86e63236c1d7fab7b162f8ee479fedc831c32d5e9b4292a8153fd531ce0bef6fffffffffb7c3c18d73454752bd912344c9edb5d3ceffcd92237b1bcf7ed31d78ef4b6f3d11e1e72732a820a56aadec7f1dde90621ef3861ae9cf029b5fc7a8052cbeb67ee1fdc50000a1d0edcf1d1fff03919a707c6a9ccc5dc1a5548371893be94318612225d717d6e6b43ed1648b0f7b6b234cc8212d7b43a8b3fd359f4d182bf27fbacde4c3021627d1fa4661e447d73718c2a41a0e03a53499686e5afceb0b1a7fa622c282d4c80303d4fd75bba670d7a1f1aae34d5506e7efa257ce77ff45043301785dc1a333027ee48ef2eb6cfacddfaad1e712b29204c78af6c5d31edbfb06e9f88f13ad25319a0500194c2e3f1124af531f27d22635ee06b04353a5eff2e9be79c3897e14df111cf81f4b61292cef6f3f7c07354302c90fd20d14c5adf7266a072392c99d1d6547a3da8d08bcff5d4db99e01ac560a55d45c43ca3d00"/404], 0x3c}, 0x1, 0x0, 0x0, 0x800}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000240)=@newqdisc={0x2c, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r4, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_qfg={0x8}]}, 0x2c}}, 0x0) bind$packet(r0, &(0x7f0000000100)={0x11, 0x0, r4, 0x1, 0x0, 0x6, @multicast}, 0x14) sendto$packet(r0, &(0x7f0000000380)="93a70b0100001006ff7f00000800", 0x36, 0x0, 0x0, 0x0) r7 = socket$alg(0x26, 0x5, 0x0) recvmsg(r7, &(0x7f00000009c0)={&(0x7f0000000180)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @private}}}, 0x80, &(0x7f0000000880)=[{&(0x7f0000000040)=""/63, 0x3f}, {&(0x7f00000003c0)=""/157, 0x9d}, {&(0x7f0000000500)=""/179, 0xb3}, {&(0x7f00000005c0)=""/223, 0xdf}, {&(0x7f00000006c0)=""/109, 0x6d}, {&(0x7f0000000340)}, {&(0x7f0000000740)=""/116, 0x74}, {&(0x7f0000000800)=""/2, 0x2}, {&(0x7f0000000840)=""/54, 0x36}], 0x9, &(0x7f0000000940)=""/77, 0x4d}, 0x200) socket$packet(0x11, 0x3, 0x300) (async) socket(0x10, 0x3, 0x0) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) sendmsg$BATADV_CMD_GET_MESH(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000280)={0x0}, 0x1, 0x0, 0x0, 0x20000075}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) (async) sendfile(r5, r6, 0x0, 0xf03b0000) (async) sendfile(r5, r5, &(0x7f0000000180), 0x0) (async) sendmsg$TIPC_CMD_GET_LINKS(r5, &(0x7f0000000480)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x100}, 0xc, &(0x7f0000000340)={&(0x7f0000000300)={0x24, 0x0, 0x1, 0x70bd26, 0x25dfdbfe, {{}, {}, {0x8, 0x11, 0x8}}, ["", "", "", "", "", "", "", ""]}, 0x24}, 0x1, 0x0, 0x0, 0x4008000}, 0x40001) (async) sendmsg$nl_route(r2, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000a00)=ANY=[@ANYBLOB="090000000000000023eeffffffe0ffe602000040", @ANYRES32=r4, @ANYBLOB="01000000010000001c0012000c0001006272696467651b0e27aa46056da6e25c4e901e86e63236c1d7fab7b162f8ee479fedc831c32d5e9b4292a8153fd531ce0bef6fffffffffb7c3c18d73454752bd912344c9edb5d3ceffcd92237b1bcf7ed31d78ef4b6f3d11e1e72732a820a56aadec7f1dde90621ef3861ae9cf029b5fc7a8052cbeb67ee1fdc50000a1d0edcf1d1fff03919a707c6a9ccc5dc1a5548371893be94318612225d717d6e6b43ed1648b0f7b6b234cc8212d7b43a8b3fd359f4d182bf27fbacde4c3021627d1fa4661e447d73718c2a41a0e03a53499686e5afceb0b1a7fa622c282d4c80303d4fd75bba670d7a1f1aae34d5506e7efa257ce77ff45043301785dc1a333027ee48ef2eb6cfacddfaad1e712b29204c78af6c5d31edbfb06e9f88f13ad25319a0500194c2e3f1124af531f27d22635ee06b04353a5eff2e9be79c3897e14df111cf81f4b61292cef6f3f7c07354302c90fd20d14c5adf7266a072392c99d1d6547a3da8d08bcff5d4db99e01ac560a55d45c43ca3d00"/404], 0x3c}, 0x1, 0x0, 0x0, 0x800}, 0x0) (async) sendmsg$nl_route_sched(r1, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000240)=@newqdisc={0x2c, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r4, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_qfg={0x8}]}, 0x2c}}, 0x0) (async) bind$packet(r0, &(0x7f0000000100)={0x11, 0x0, r4, 0x1, 0x0, 0x6, @multicast}, 0x14) (async) sendto$packet(r0, &(0x7f0000000380)="93a70b0100001006ff7f00000800", 0x36, 0x0, 0x0, 0x0) (async) socket$alg(0x26, 0x5, 0x0) (async) recvmsg(r7, &(0x7f00000009c0)={&(0x7f0000000180)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @private}}}, 0x80, &(0x7f0000000880)=[{&(0x7f0000000040)=""/63, 0x3f}, {&(0x7f00000003c0)=""/157, 0x9d}, {&(0x7f0000000500)=""/179, 0xb3}, {&(0x7f00000005c0)=""/223, 0xdf}, {&(0x7f00000006c0)=""/109, 0x6d}, {&(0x7f0000000340)}, {&(0x7f0000000740)=""/116, 0x74}, {&(0x7f0000000800)=""/2, 0x2}, {&(0x7f0000000840)=""/54, 0x36}], 0x9, &(0x7f0000000940)=""/77, 0x4d}, 0x200) (async) [ 2802.138558][T21069] bond977: entered promiscuous mode [ 2802.146024][T21069] 8021q: adding VLAN 0 to HW filter on device bond977 [ 2802.297299][T21070] bond977: (slave bridge1043): making interface the new active one [ 2802.308415][T21070] bridge1043: entered promiscuous mode [ 2802.323935][T21070] bond977: (slave bridge1043): Enslaving as an active interface with an up link 17:02:47 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xea030000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2802.350734][T21073] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2802.457081][T21073] bond1045: entered promiscuous mode [ 2802.465735][T21073] 8021q: adding VLAN 0 to HW filter on device bond1045 17:02:47 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x62, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2802.613828][T21076] bond1045: (slave bridge1072): making interface the new active one [ 2802.622641][T21076] bridge1072: entered promiscuous mode [ 2802.643960][T21076] bond1045: (slave bridge1072): Enslaving as an active interface with an up link 17:02:47 executing program 0: r0 = socket$packet(0x11, 0x3, 0x300) (async) r1 = socket(0x10, 0x3, 0x0) (async) r2 = socket$nl_route(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) sendmsg$BATADV_CMD_GET_MESH(r3, &(0x7f00000004c0)={0x0, 0x0, &(0x7f0000000280)={0x0}, 0x1, 0x0, 0x0, 0x20000075}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) (async) sendfile(r5, r6, 0x0, 0xf03b0000) (async) sendfile(r5, r5, &(0x7f0000000180), 0x0) sendmsg$TIPC_CMD_GET_LINKS(r5, &(0x7f0000000480)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x100}, 0xc, &(0x7f0000000340)={&(0x7f0000000300)={0x24, 0x0, 0x1, 0x70bd26, 0x25dfdbfe, {{}, {}, {0x8, 0x11, 0x8}}, ["", "", "", "", "", "", "", ""]}, 0x24}, 0x1, 0x0, 0x0, 0x4008000}, 0x40001) (async) sendmsg$nl_route(r2, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000a00)=ANY=[@ANYBLOB="090000000000000023eeffffffe0ffe602000040", @ANYRES32=r4, @ANYBLOB="01000000010000001c0012000c0001006272696467651b0e27aa46056da6e25c4e901e86e63236c1d7fab7b162f8ee479fedc831c32d5e9b4292a8153fd531ce0bef6fffffffffb7c3c18d73454752bd912344c9edb5d3ceffcd92237b1bcf7ed31d78ef4b6f3d11e1e72732a820a56aadec7f1dde90621ef3861ae9cf029b5fc7a8052cbeb67ee1fdc50000a1d0edcf1d1fff03919a707c6a9ccc5dc1a5548371893be94318612225d717d6e6b43ed1648b0f7b6b234cc8212d7b43a8b3fd359f4d182bf27fbacde4c3021627d1fa4661e447d73718c2a41a0e03a53499686e5afceb0b1a7fa622c282d4c80303d4fd75bba670d7a1f1aae34d5506e7efa257ce77ff45043301785dc1a333027ee48ef2eb6cfacddfaad1e712b29204c78af6c5d31edbfb06e9f88f13ad25319a0500194c2e3f1124af531f27d22635ee06b04353a5eff2e9be79c3897e14df111cf81f4b61292cef6f3f7c07354302c90fd20d14c5adf7266a072392c99d1d6547a3da8d08bcff5d4db99e01ac560a55d45c43ca3d00"/404], 0x3c}, 0x1, 0x0, 0x0, 0x800}, 0x0) (async) sendmsg$nl_route_sched(r1, &(0x7f00000007c0)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000240)=@newqdisc={0x2c, 0x24, 0xe0b, 0x0, 0x0, {0x0, 0x0, 0x0, r4, {}, {0xffff, 0xffff}}, [@qdisc_kind_options=@q_qfg={0x8}]}, 0x2c}}, 0x0) (async) bind$packet(r0, &(0x7f0000000100)={0x11, 0x0, r4, 0x1, 0x0, 0x6, @multicast}, 0x14) (async) sendto$packet(r0, &(0x7f0000000380)="93a70b0100001006ff7f00000800", 0x36, 0x0, 0x0, 0x0) r7 = socket$alg(0x26, 0x5, 0x0) recvmsg(r7, &(0x7f00000009c0)={&(0x7f0000000180)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @private}}}, 0x80, &(0x7f0000000880)=[{&(0x7f0000000040)=""/63, 0x3f}, {&(0x7f00000003c0)=""/157, 0x9d}, {&(0x7f0000000500)=""/179, 0xb3}, {&(0x7f00000005c0)=""/223, 0xdf}, {&(0x7f00000006c0)=""/109, 0x6d}, {&(0x7f0000000340)}, {&(0x7f0000000740)=""/116, 0x74}, {&(0x7f0000000800)=""/2, 0x2}, {&(0x7f0000000840)=""/54, 0x36}], 0x9, &(0x7f0000000940)=""/77, 0x4d}, 0x200) [ 2802.711782][T21084] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:02:47 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}, 0x1, 0x0, 0x0, 0xffffffff}, 0x0) 17:02:47 executing program 0: r0 = getpid() socketpair(0x5, 0x80000, 0x5, &(0x7f0000000340)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = syz_genetlink_get_family_id$devlink(&(0x7f00000003c0), 0xffffffffffffffff) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000006c0)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) ioctl$sock_SIOCGPGRP(0xffffffffffffffff, 0x8904, &(0x7f0000000400)=0x0) sendmsg$DEVLINK_CMD_RELOAD(r1, &(0x7f0000000600)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000540)={&(0x7f0000000440)={0xd4, r2, 0x100, 0x70bd25, 0x25dfdbfd, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_FD={0x8, 0x8a, r3}}, {@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_FD={0x8}}, {@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_PID={0x8, 0x8b, r0}}, {@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_NETNS_PID={0x8, 0x8b, r4}}, {@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_NETNS_FD={0x8}}]}, 0xd4}, 0x1, 0x0, 0x0, 0x4000}, 0x1) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000780)={0xffffffffffffffff}) r6 = accept4(r1, &(0x7f0000000e00)=@sco={0x1f, @none}, &(0x7f0000000e80)=0x80, 0x100000) sendmsg$nl_route(r6, &(0x7f0000000fc0)={&(0x7f0000000ec0)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000000f80)={&(0x7f0000000f00)=@delnexthop={0x48, 0x69, 0x200, 0x70bd2a, 0x25dfdbfc, {}, [{0x8, 0x1, 0x1}, {0x8}, {0x8, 0x1, 0x1}, {0x8, 0x1, 0x1}, {0x8, 0x1, 0x2}, {0x8}]}, 0x48}, 0x1, 0x0, 0x0, 0x20000000}, 0x4000001) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r7, 0x0, 0x8000000000004) openat$cgroup_ro(r7, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) getsockopt$sock_cred(r7, 0x1, 0x11, &(0x7f0000000640), &(0x7f0000000680)=0xc) connect$unix(r5, &(0x7f0000000180)=@abs={0x0, 0x0, 0x4e23}, 0x6e) recvmmsg(r5, &(0x7f00000000c0), 0x10106, 0x2, 0x0) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000580)={0x18, 0x4, &(0x7f00000002c0)=ANY=[@ANYBLOB="18010000000000000000000000000000850000007d00000095"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x40f00, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r8 = socket$inet(0x2, 0x2, 0x0) setsockopt$IPT_SO_SET_REPLACE(r8, 0x4000000000000, 0x40, &(0x7f0000000000)=@raw={'raw\x00', 0x8, 0x3, 0x2a0, 0xc8, 0x8, 0xfa02, 0x0, 0x6c00, 0x208, 0x194, 0x194, 0x208, 0x194, 0x7fffffe, 0x0, {[{{@ip={@broadcast, @broadcast, 0xffffff00, 0xffffff00, 'ip6gretap0\x00', 'ip6gretap0\x00', {}, {}, 0x21, 0x2, 0x1c}, 0x0, 0xa0, 0xc8, 0x0, {0x0, 0x74020000}, [@common=@inet=@tcp={{0x30}, {[], [], 0x0, 0x0, 0x7a}}]}, @common=@inet=@TCPMSS={0x28}}, {{@ip={@multicast2, @dev, 0x0, 0x0, '\x00', 'tunl0\x00', {0xff}}, 0x0, 0xf8, 0x140, 0x0, {}, [@common=@unspec=@helper={{0x48}, {0x1, 'amanda\x00'}}, @common=@unspec=@connlimit={{0x40}}]}, @unspec=@CT0={0x48, 'CT\x00', 0x0, {0x0, 0x0, 0x0, 0x0, 'syz1\x00'}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x300) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) r10 = syz_genetlink_get_family_id$devlink(&(0x7f0000000740), r3) sendmsg$DEVLINK_CMD_SB_OCC_SNAPSHOT(r9, &(0x7f0000000dc0)={&(0x7f0000000700)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000d80)={&(0x7f0000000d40)={0x38, r10, 0x2, 0x70bd2b, 0x25dfdbfe, {}, [{@pci={{0x8}, {0x11}}, {0x8, 0xb, 0x401}}]}, 0x38}, 0x1, 0x0, 0x0, 0x145}, 0x880) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000200), 0xf000) sendfile(0xffffffffffffffff, r11, 0x0, 0xf03b0000) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r12, r11, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$DEVLINK_CMD_RATE_DEL(r11, &(0x7f0000001180)={&(0x7f0000001000)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000001140)={&(0x7f0000001040)={0xf8, 0x0, 0x400, 0x70bd27, 0x25dfdbfe, {}, [@DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x1}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_RATE_NODE_NAME={0x93, 0xa8, @random="070490cd7b91e112dceb1b0ac6bb6cbbdc8b5785600d1e725887e658cc1190f807233d087535feda18e721640be3e63c92002522e7640509ac08dc6c40a9775363b9048da9ef906940836850656f52edb434789a89049c985089943e8929ac5787c5636f19b893a5dbb5376e97adc68b37d33498d136975a8e942088b3ad4282b691298e52ede369d24e7abd687e67"}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}]}, 0xf8}, 0x1, 0x0, 0x0, 0x44000}, 0x2040015) setsockopt$IPT_SO_SET_REPLACE(0xffffffffffffffff, 0x0, 0x40, &(0x7f00000007c0)=@filter={'filter\x00', 0xe, 0x4, 0x510, 0xffffffff, 0x190, 0x288, 0x0, 0xffffffff, 0xffffffff, 0x478, 0x478, 0x478, 0xffffffff, 0x4, &(0x7f0000000300), {[{{@uncond, 0x0, 0x168, 0x190, 0x0, {}, [@common=@addrtype={{0x30}, {0x200, 0x40, 0x0, 0x1}}, @common=@unspec=@conntrack3={{0xc8}, {{@ipv4=@private=0xa010100, [0xff000000, 0xff, 0xff], @ipv6=@empty, [0xffffff00, 0xff000000, 0xffffff00, 0xff000000], @ipv6=@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', [0xff, 0x0, 0xffffffff, 0xffffff00], @ipv6=@private2, [0xffffff00, 0xff000000, 0xffffffff, 0xffffff00], 0xffffff4f, 0xf0, 0x0, 0x4e20, 0x4e24, 0x4e24, 0x4e20, 0x0, 0x622}, 0x80, 0x2001, 0x3ff, 0x4e21, 0x4e23, 0x4e20}}]}, @REJECT={0x28, 'REJECT\x00', 0x0, {0x7}}}, {{@uncond, 0x0, 0x98, 0xf8, 0x0, {}, [@common=@ttl={{0x28}, {0x3, 0x8}}]}, @common=@CLUSTERIP={0x60, 'CLUSTERIP\x00', 0x0, {0x0, @random="92d65d3bab4b", 0x7fff, 0x0, [0x14, 0x10, 0x29, 0x39, 0x2c, 0x34, 0x2e, 0x3, 0xa, 0x14, 0x19, 0x29, 0x36, 0xb, 0x1d, 0x2a], 0x1, 0x6, 0x4}}}, {{@uncond, 0x0, 0x1a8, 0x1f0, 0x0, {}, [@common=@inet=@recent1={{0x108}, {0x1, 0x4, 0x0, 0x1, 'syz1\x00', 0x1, [0xffffffff, 0xffffff00, 0xffffffff, 0xffffffff]}}, @common=@addrtype={{0x30}, {0x800, 0x86, 0x0, 0x1}}]}, @common=@inet=@TEE={0x48, 'TEE\x00', 0x1, {@ipv4=@loopback, 'syzkaller0\x00', {0x80000000}}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x570) 17:02:47 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x9}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2802.814899][T21084] bond625: entered promiscuous mode [ 2802.821533][T21084] 8021q: adding VLAN 0 to HW filter on device bond625 [ 2802.834108][T21089] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2802.925954][T21089] bond748: entered promiscuous mode [ 2802.931849][T21089] 8021q: adding VLAN 0 to HW filter on device bond748 17:02:48 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x60000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2803.009106][T21100] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2803.166660][T21100] bond978: entered promiscuous mode [ 2803.172928][T21100] 8021q: adding VLAN 0 to HW filter on device bond978 [ 2803.205606][T21135] x_tables: ip_tables: tcp match: only valid for protocol 6 [ 2803.272696][T21102] bond978: (slave bridge1044): making interface the new active one [ 2803.281938][T21102] bridge1044: entered promiscuous mode [ 2803.297002][T21102] bond978: (slave bridge1044): Enslaving as an active interface with an up link 17:02:48 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xeaab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2803.339052][T21104] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2803.409537][T21104] bond1046: entered promiscuous mode [ 2803.415488][T21104] 8021q: adding VLAN 0 to HW filter on device bond1046 [ 2803.473426][T21112] bond1046: (slave bridge1073): making interface the new active one [ 2803.482884][T21112] bridge1073: entered promiscuous mode [ 2803.495085][T21112] bond1046: (slave bridge1073): Enslaving as an active interface with an up link 17:02:48 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x68, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:02:48 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x2) [ 2803.588543][T21128] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2803.651377][T21128] bond626: entered promiscuous mode [ 2803.657308][T21128] 8021q: adding VLAN 0 to HW filter on device bond626 17:02:48 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0xa}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2803.801489][T21132] bond749: entered promiscuous mode [ 2803.807452][T21132] 8021q: adding VLAN 0 to HW filter on device bond749 17:02:48 executing program 0: r0 = getpid() socketpair(0x5, 0x80000, 0x5, &(0x7f0000000340)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = syz_genetlink_get_family_id$devlink(&(0x7f00000003c0), 0xffffffffffffffff) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000006c0)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) (async) ioctl$sock_SIOCGPGRP(0xffffffffffffffff, 0x8904, &(0x7f0000000400)=0x0) sendmsg$DEVLINK_CMD_RELOAD(r1, &(0x7f0000000600)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000540)={&(0x7f0000000440)={0xd4, r2, 0x100, 0x70bd25, 0x25dfdbfd, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_FD={0x8, 0x8a, r3}}, {@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_FD={0x8}}, {@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_PID={0x8, 0x8b, r0}}, {@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_NETNS_PID={0x8, 0x8b, r4}}, {@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_NETNS_FD={0x8}}]}, 0xd4}, 0x1, 0x0, 0x0, 0x4000}, 0x1) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000780)={0xffffffffffffffff}) (async) r6 = accept4(r1, &(0x7f0000000e00)=@sco={0x1f, @none}, &(0x7f0000000e80)=0x80, 0x100000) sendmsg$nl_route(r6, &(0x7f0000000fc0)={&(0x7f0000000ec0)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000000f80)={&(0x7f0000000f00)=@delnexthop={0x48, 0x69, 0x200, 0x70bd2a, 0x25dfdbfc, {}, [{0x8, 0x1, 0x1}, {0x8}, {0x8, 0x1, 0x1}, {0x8, 0x1, 0x1}, {0x8, 0x1, 0x2}, {0x8}]}, 0x48}, 0x1, 0x0, 0x0, 0x20000000}, 0x4000001) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r7, 0x0, 0x8000000000004) (async) openat$cgroup_ro(r7, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) (async) getsockopt$sock_cred(r7, 0x1, 0x11, &(0x7f0000000640), &(0x7f0000000680)=0xc) (async) connect$unix(r5, &(0x7f0000000180)=@abs={0x0, 0x0, 0x4e23}, 0x6e) (async) recvmmsg(r5, &(0x7f00000000c0), 0x10106, 0x2, 0x0) (async) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000580)={0x18, 0x4, &(0x7f00000002c0)=ANY=[@ANYBLOB="18010000000000000000000000000000850000007d00000095"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x40f00, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r8 = socket$inet(0x2, 0x2, 0x0) setsockopt$IPT_SO_SET_REPLACE(r8, 0x4000000000000, 0x40, &(0x7f0000000000)=@raw={'raw\x00', 0x8, 0x3, 0x2a0, 0xc8, 0x8, 0xfa02, 0x0, 0x6c00, 0x208, 0x194, 0x194, 0x208, 0x194, 0x7fffffe, 0x0, {[{{@ip={@broadcast, @broadcast, 0xffffff00, 0xffffff00, 'ip6gretap0\x00', 'ip6gretap0\x00', {}, {}, 0x21, 0x2, 0x1c}, 0x0, 0xa0, 0xc8, 0x0, {0x0, 0x74020000}, [@common=@inet=@tcp={{0x30}, {[], [], 0x0, 0x0, 0x7a}}]}, @common=@inet=@TCPMSS={0x28}}, {{@ip={@multicast2, @dev, 0x0, 0x0, '\x00', 'tunl0\x00', {0xff}}, 0x0, 0xf8, 0x140, 0x0, {}, [@common=@unspec=@helper={{0x48}, {0x1, 'amanda\x00'}}, @common=@unspec=@connlimit={{0x40}}]}, @unspec=@CT0={0x48, 'CT\x00', 0x0, {0x0, 0x0, 0x0, 0x0, 'syz1\x00'}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x300) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) r10 = syz_genetlink_get_family_id$devlink(&(0x7f0000000740), r3) sendmsg$DEVLINK_CMD_SB_OCC_SNAPSHOT(r9, &(0x7f0000000dc0)={&(0x7f0000000700)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000d80)={&(0x7f0000000d40)={0x38, r10, 0x2, 0x70bd2b, 0x25dfdbfe, {}, [{@pci={{0x8}, {0x11}}, {0x8, 0xb, 0x401}}]}, 0x38}, 0x1, 0x0, 0x0, 0x145}, 0x880) (async) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000200), 0xf000) (async) sendfile(0xffffffffffffffff, r11, 0x0, 0xf03b0000) (async) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r12, r11, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$DEVLINK_CMD_RATE_DEL(r11, &(0x7f0000001180)={&(0x7f0000001000)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000001140)={&(0x7f0000001040)={0xf8, 0x0, 0x400, 0x70bd27, 0x25dfdbfe, {}, [@DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x1}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_RATE_NODE_NAME={0x93, 0xa8, @random="070490cd7b91e112dceb1b0ac6bb6cbbdc8b5785600d1e725887e658cc1190f807233d087535feda18e721640be3e63c92002522e7640509ac08dc6c40a9775363b9048da9ef906940836850656f52edb434789a89049c985089943e8929ac5787c5636f19b893a5dbb5376e97adc68b37d33498d136975a8e942088b3ad4282b691298e52ede369d24e7abd687e67"}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}]}, 0xf8}, 0x1, 0x0, 0x0, 0x44000}, 0x2040015) setsockopt$IPT_SO_SET_REPLACE(0xffffffffffffffff, 0x0, 0x40, &(0x7f00000007c0)=@filter={'filter\x00', 0xe, 0x4, 0x510, 0xffffffff, 0x190, 0x288, 0x0, 0xffffffff, 0xffffffff, 0x478, 0x478, 0x478, 0xffffffff, 0x4, &(0x7f0000000300), {[{{@uncond, 0x0, 0x168, 0x190, 0x0, {}, [@common=@addrtype={{0x30}, {0x200, 0x40, 0x0, 0x1}}, @common=@unspec=@conntrack3={{0xc8}, {{@ipv4=@private=0xa010100, [0xff000000, 0xff, 0xff], @ipv6=@empty, [0xffffff00, 0xff000000, 0xffffff00, 0xff000000], @ipv6=@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', [0xff, 0x0, 0xffffffff, 0xffffff00], @ipv6=@private2, [0xffffff00, 0xff000000, 0xffffffff, 0xffffff00], 0xffffff4f, 0xf0, 0x0, 0x4e20, 0x4e24, 0x4e24, 0x4e20, 0x0, 0x622}, 0x80, 0x2001, 0x3ff, 0x4e21, 0x4e23, 0x4e20}}]}, @REJECT={0x28, 'REJECT\x00', 0x0, {0x7}}}, {{@uncond, 0x0, 0x98, 0xf8, 0x0, {}, [@common=@ttl={{0x28}, {0x3, 0x8}}]}, @common=@CLUSTERIP={0x60, 'CLUSTERIP\x00', 0x0, {0x0, @random="92d65d3bab4b", 0x7fff, 0x0, [0x14, 0x10, 0x29, 0x39, 0x2c, 0x34, 0x2e, 0x3, 0xa, 0x14, 0x19, 0x29, 0x36, 0xb, 0x1d, 0x2a], 0x1, 0x6, 0x4}}}, {{@uncond, 0x0, 0x1a8, 0x1f0, 0x0, {}, [@common=@inet=@recent1={{0x108}, {0x1, 0x4, 0x0, 0x1, 'syz1\x00', 0x1, [0xffffffff, 0xffffff00, 0xffffffff, 0xffffffff]}}, @common=@addrtype={{0x30}, {0x800, 0x86, 0x0, 0x1}}]}, @common=@inet=@TEE={0x48, 'TEE\x00', 0x1, {@ipv4=@loopback, 'syzkaller0\x00', {0x80000000}}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x570) 17:02:48 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x65580000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:02:49 executing program 0: r0 = getpid() socketpair(0x5, 0x80000, 0x5, &(0x7f0000000340)) (async) socketpair(0x5, 0x80000, 0x5, &(0x7f0000000340)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = syz_genetlink_get_family_id$devlink(&(0x7f00000003c0), 0xffffffffffffffff) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000006c0)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) ioctl$sock_SIOCGPGRP(0xffffffffffffffff, 0x8904, &(0x7f0000000400)=0x0) sendmsg$DEVLINK_CMD_RELOAD(r1, &(0x7f0000000600)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000540)={&(0x7f0000000440)={0xd4, r2, 0x100, 0x70bd25, 0x25dfdbfd, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_FD={0x8, 0x8a, r3}}, {@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_FD={0x8}}, {@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_PID={0x8, 0x8b, r0}}, {@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_NETNS_PID={0x8, 0x8b, r4}}, {@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_NETNS_FD={0x8}}]}, 0xd4}, 0x1, 0x0, 0x0, 0x4000}, 0x1) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000780)={0xffffffffffffffff}) r6 = accept4(r1, &(0x7f0000000e00)=@sco={0x1f, @none}, &(0x7f0000000e80)=0x80, 0x100000) sendmsg$nl_route(r6, &(0x7f0000000fc0)={&(0x7f0000000ec0)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000000f80)={&(0x7f0000000f00)=@delnexthop={0x48, 0x69, 0x200, 0x70bd2a, 0x25dfdbfc, {}, [{0x8, 0x1, 0x1}, {0x8}, {0x8, 0x1, 0x1}, {0x8, 0x1, 0x1}, {0x8, 0x1, 0x2}, {0x8}]}, 0x48}, 0x1, 0x0, 0x0, 0x20000000}, 0x4000001) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r7, 0x0, 0x8000000000004) (async) sendfile(0xffffffffffffffff, r7, 0x0, 0x8000000000004) openat$cgroup_ro(r7, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) (async) openat$cgroup_ro(r7, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) getsockopt$sock_cred(r7, 0x1, 0x11, &(0x7f0000000640), &(0x7f0000000680)=0xc) (async) getsockopt$sock_cred(r7, 0x1, 0x11, &(0x7f0000000640), &(0x7f0000000680)=0xc) connect$unix(r5, &(0x7f0000000180)=@abs={0x0, 0x0, 0x4e23}, 0x6e) (async) connect$unix(r5, &(0x7f0000000180)=@abs={0x0, 0x0, 0x4e23}, 0x6e) recvmmsg(r5, &(0x7f00000000c0), 0x10106, 0x2, 0x0) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000580)={0x18, 0x4, &(0x7f00000002c0)=ANY=[@ANYBLOB="18010000000000000000000000000000850000007d00000095"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x40f00, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r8 = socket$inet(0x2, 0x2, 0x0) setsockopt$IPT_SO_SET_REPLACE(r8, 0x4000000000000, 0x40, &(0x7f0000000000)=@raw={'raw\x00', 0x8, 0x3, 0x2a0, 0xc8, 0x8, 0xfa02, 0x0, 0x6c00, 0x208, 0x194, 0x194, 0x208, 0x194, 0x7fffffe, 0x0, {[{{@ip={@broadcast, @broadcast, 0xffffff00, 0xffffff00, 'ip6gretap0\x00', 'ip6gretap0\x00', {}, {}, 0x21, 0x2, 0x1c}, 0x0, 0xa0, 0xc8, 0x0, {0x0, 0x74020000}, [@common=@inet=@tcp={{0x30}, {[], [], 0x0, 0x0, 0x7a}}]}, @common=@inet=@TCPMSS={0x28}}, {{@ip={@multicast2, @dev, 0x0, 0x0, '\x00', 'tunl0\x00', {0xff}}, 0x0, 0xf8, 0x140, 0x0, {}, [@common=@unspec=@helper={{0x48}, {0x1, 'amanda\x00'}}, @common=@unspec=@connlimit={{0x40}}]}, @unspec=@CT0={0x48, 'CT\x00', 0x0, {0x0, 0x0, 0x0, 0x0, 'syz1\x00'}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x300) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) (async) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) syz_genetlink_get_family_id$devlink(&(0x7f0000000740), r3) (async) r10 = syz_genetlink_get_family_id$devlink(&(0x7f0000000740), r3) sendmsg$DEVLINK_CMD_SB_OCC_SNAPSHOT(r9, &(0x7f0000000dc0)={&(0x7f0000000700)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000d80)={&(0x7f0000000d40)={0x38, r10, 0x2, 0x70bd2b, 0x25dfdbfe, {}, [{@pci={{0x8}, {0x11}}, {0x8, 0xb, 0x401}}]}, 0x38}, 0x1, 0x0, 0x0, 0x145}, 0x880) (async) sendmsg$DEVLINK_CMD_SB_OCC_SNAPSHOT(r9, &(0x7f0000000dc0)={&(0x7f0000000700)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000d80)={&(0x7f0000000d40)={0x38, r10, 0x2, 0x70bd2b, 0x25dfdbfe, {}, [{@pci={{0x8}, {0x11}}, {0x8, 0xb, 0x401}}]}, 0x38}, 0x1, 0x0, 0x0, 0x145}, 0x880) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000200), 0xf000) sendfile(0xffffffffffffffff, r11, 0x0, 0xf03b0000) (async) sendfile(0xffffffffffffffff, r11, 0x0, 0xf03b0000) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r12, r11, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$DEVLINK_CMD_RATE_DEL(r11, &(0x7f0000001180)={&(0x7f0000001000)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000001140)={&(0x7f0000001040)={0xf8, 0x0, 0x400, 0x70bd27, 0x25dfdbfe, {}, [@DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x1}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_RATE_NODE_NAME={0x93, 0xa8, @random="070490cd7b91e112dceb1b0ac6bb6cbbdc8b5785600d1e725887e658cc1190f807233d087535feda18e721640be3e63c92002522e7640509ac08dc6c40a9775363b9048da9ef906940836850656f52edb434789a89049c985089943e8929ac5787c5636f19b893a5dbb5376e97adc68b37d33498d136975a8e942088b3ad4282b691298e52ede369d24e7abd687e67"}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}]}, 0xf8}, 0x1, 0x0, 0x0, 0x44000}, 0x2040015) (async) sendmsg$DEVLINK_CMD_RATE_DEL(r11, &(0x7f0000001180)={&(0x7f0000001000)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000001140)={&(0x7f0000001040)={0xf8, 0x0, 0x400, 0x70bd27, 0x25dfdbfe, {}, [@DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x1}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_RATE_NODE_NAME={0x93, 0xa8, @random="070490cd7b91e112dceb1b0ac6bb6cbbdc8b5785600d1e725887e658cc1190f807233d087535feda18e721640be3e63c92002522e7640509ac08dc6c40a9775363b9048da9ef906940836850656f52edb434789a89049c985089943e8929ac5787c5636f19b893a5dbb5376e97adc68b37d33498d136975a8e942088b3ad4282b691298e52ede369d24e7abd687e67"}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}]}, 0xf8}, 0x1, 0x0, 0x0, 0x44000}, 0x2040015) setsockopt$IPT_SO_SET_REPLACE(0xffffffffffffffff, 0x0, 0x40, &(0x7f00000007c0)=@filter={'filter\x00', 0xe, 0x4, 0x510, 0xffffffff, 0x190, 0x288, 0x0, 0xffffffff, 0xffffffff, 0x478, 0x478, 0x478, 0xffffffff, 0x4, &(0x7f0000000300), {[{{@uncond, 0x0, 0x168, 0x190, 0x0, {}, [@common=@addrtype={{0x30}, {0x200, 0x40, 0x0, 0x1}}, @common=@unspec=@conntrack3={{0xc8}, {{@ipv4=@private=0xa010100, [0xff000000, 0xff, 0xff], @ipv6=@empty, [0xffffff00, 0xff000000, 0xffffff00, 0xff000000], @ipv6=@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', [0xff, 0x0, 0xffffffff, 0xffffff00], @ipv6=@private2, [0xffffff00, 0xff000000, 0xffffffff, 0xffffff00], 0xffffff4f, 0xf0, 0x0, 0x4e20, 0x4e24, 0x4e24, 0x4e20, 0x0, 0x622}, 0x80, 0x2001, 0x3ff, 0x4e21, 0x4e23, 0x4e20}}]}, @REJECT={0x28, 'REJECT\x00', 0x0, {0x7}}}, {{@uncond, 0x0, 0x98, 0xf8, 0x0, {}, [@common=@ttl={{0x28}, {0x3, 0x8}}]}, @common=@CLUSTERIP={0x60, 'CLUSTERIP\x00', 0x0, {0x0, @random="92d65d3bab4b", 0x7fff, 0x0, [0x14, 0x10, 0x29, 0x39, 0x2c, 0x34, 0x2e, 0x3, 0xa, 0x14, 0x19, 0x29, 0x36, 0xb, 0x1d, 0x2a], 0x1, 0x6, 0x4}}}, {{@uncond, 0x0, 0x1a8, 0x1f0, 0x0, {}, [@common=@inet=@recent1={{0x108}, {0x1, 0x4, 0x0, 0x1, 'syz1\x00', 0x1, [0xffffffff, 0xffffff00, 0xffffffff, 0xffffffff]}}, @common=@addrtype={{0x30}, {0x800, 0x86, 0x0, 0x1}}]}, @common=@inet=@TEE={0x48, 'TEE\x00', 0x1, {@ipv4=@loopback, 'syzkaller0\x00', {0x80000000}}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x570) [ 2804.017853][T21138] bond979: entered promiscuous mode [ 2804.037697][T21138] 8021q: adding VLAN 0 to HW filter on device bond979 [ 2804.242154][T21139] bond979: (slave bridge1045): making interface the new active one 17:02:49 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xebab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2804.283052][T21139] bridge1045: entered promiscuous mode [ 2804.303017][T21139] bond979: (slave bridge1045): Enslaving as an active interface with an up link 17:02:49 executing program 0: r0 = getpid() socketpair(0x5, 0x80000, 0x5, &(0x7f0000000340)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = syz_genetlink_get_family_id$devlink(&(0x7f00000003c0), 0xffffffffffffffff) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000006c0)='blkio.bfq.io_queued_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) ioctl$sock_SIOCGPGRP(0xffffffffffffffff, 0x8904, &(0x7f0000000400)=0x0) sendmsg$DEVLINK_CMD_RELOAD(r1, &(0x7f0000000600)={&(0x7f0000000380)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000540)={&(0x7f0000000440)={0xd4, r2, 0x100, 0x70bd25, 0x25dfdbfd, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_FD={0x8, 0x8a, r3}}, {@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_FD={0x8}}, {@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, @DEVLINK_ATTR_NETNS_PID={0x8, 0x8b, r0}}, {@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_NETNS_PID={0x8, 0x8b, r4}}, {@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_NETNS_FD={0x8}}]}, 0xd4}, 0x1, 0x0, 0x0, 0x4000}, 0x1) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000780)={0xffffffffffffffff}) r6 = accept4(r1, &(0x7f0000000e00)=@sco={0x1f, @none}, &(0x7f0000000e80)=0x80, 0x100000) sendmsg$nl_route(r6, &(0x7f0000000fc0)={&(0x7f0000000ec0)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000000f80)={&(0x7f0000000f00)=@delnexthop={0x48, 0x69, 0x200, 0x70bd2a, 0x25dfdbfc, {}, [{0x8, 0x1, 0x1}, {0x8}, {0x8, 0x1, 0x1}, {0x8, 0x1, 0x1}, {0x8, 0x1, 0x2}, {0x8}]}, 0x48}, 0x1, 0x0, 0x0, 0x20000000}, 0x4000001) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r7, 0x0, 0x8000000000004) openat$cgroup_ro(r7, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) getsockopt$sock_cred(r7, 0x1, 0x11, &(0x7f0000000640), &(0x7f0000000680)=0xc) connect$unix(r5, &(0x7f0000000180)=@abs={0x0, 0x0, 0x4e23}, 0x6e) recvmmsg(r5, &(0x7f00000000c0), 0x10106, 0x2, 0x0) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000580)={0x18, 0x4, &(0x7f00000002c0)=ANY=[@ANYBLOB="18010000000000000000000000000000850000007d00000095"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x40f00, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r8 = socket$inet(0x2, 0x2, 0x0) setsockopt$IPT_SO_SET_REPLACE(r8, 0x4000000000000, 0x40, &(0x7f0000000000)=@raw={'raw\x00', 0x8, 0x3, 0x2a0, 0xc8, 0x8, 0xfa02, 0x0, 0x6c00, 0x208, 0x194, 0x194, 0x208, 0x194, 0x7fffffe, 0x0, {[{{@ip={@broadcast, @broadcast, 0xffffff00, 0xffffff00, 'ip6gretap0\x00', 'ip6gretap0\x00', {}, {}, 0x21, 0x2, 0x1c}, 0x0, 0xa0, 0xc8, 0x0, {0x0, 0x74020000}, [@common=@inet=@tcp={{0x30}, {[], [], 0x0, 0x0, 0x7a}}]}, @common=@inet=@TCPMSS={0x28}}, {{@ip={@multicast2, @dev, 0x0, 0x0, '\x00', 'tunl0\x00', {0xff}}, 0x0, 0xf8, 0x140, 0x0, {}, [@common=@unspec=@helper={{0x48}, {0x1, 'amanda\x00'}}, @common=@unspec=@connlimit={{0x40}}]}, @unspec=@CT0={0x48, 'CT\x00', 0x0, {0x0, 0x0, 0x0, 0x0, 'syz1\x00'}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x300) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) r10 = syz_genetlink_get_family_id$devlink(&(0x7f0000000740), r3) sendmsg$DEVLINK_CMD_SB_OCC_SNAPSHOT(r9, &(0x7f0000000dc0)={&(0x7f0000000700)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000d80)={&(0x7f0000000d40)={0x38, r10, 0x2, 0x70bd2b, 0x25dfdbfe, {}, [{@pci={{0x8}, {0x11}}, {0x8, 0xb, 0x401}}]}, 0x38}, 0x1, 0x0, 0x0, 0x145}, 0x880) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000200), 0xf000) sendfile(0xffffffffffffffff, r11, 0x0, 0xf03b0000) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r12, r11, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$DEVLINK_CMD_RATE_DEL(r11, &(0x7f0000001180)={&(0x7f0000001000)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000001140)={&(0x7f0000001040)={0xf8, 0x0, 0x400, 0x70bd27, 0x25dfdbfe, {}, [@DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x1}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_RATE_NODE_NAME={0x93, 0xa8, @random="070490cd7b91e112dceb1b0ac6bb6cbbdc8b5785600d1e725887e658cc1190f807233d087535feda18e721640be3e63c92002522e7640509ac08dc6c40a9775363b9048da9ef906940836850656f52edb434789a89049c985089943e8929ac5787c5636f19b893a5dbb5376e97adc68b37d33498d136975a8e942088b3ad4282b691298e52ede369d24e7abd687e67"}, @handle=@pci={{0x8}, {0x11}}, @DEVLINK_ATTR_PORT_INDEX={0x8, 0x3, 0x3}]}, 0xf8}, 0x1, 0x0, 0x0, 0x44000}, 0x2040015) setsockopt$IPT_SO_SET_REPLACE(0xffffffffffffffff, 0x0, 0x40, &(0x7f00000007c0)=@filter={'filter\x00', 0xe, 0x4, 0x510, 0xffffffff, 0x190, 0x288, 0x0, 0xffffffff, 0xffffffff, 0x478, 0x478, 0x478, 0xffffffff, 0x4, &(0x7f0000000300), {[{{@uncond, 0x0, 0x168, 0x190, 0x0, {}, [@common=@addrtype={{0x30}, {0x200, 0x40, 0x0, 0x1}}, @common=@unspec=@conntrack3={{0xc8}, {{@ipv4=@private=0xa010100, [0xff000000, 0xff, 0xff], @ipv6=@empty, [0xffffff00, 0xff000000, 0xffffff00, 0xff000000], @ipv6=@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', [0xff, 0x0, 0xffffffff, 0xffffff00], @ipv6=@private2, [0xffffff00, 0xff000000, 0xffffffff, 0xffffff00], 0xffffff4f, 0xf0, 0x0, 0x4e20, 0x4e24, 0x4e24, 0x4e20, 0x0, 0x622}, 0x80, 0x2001, 0x3ff, 0x4e21, 0x4e23, 0x4e20}}]}, @REJECT={0x28, 'REJECT\x00', 0x0, {0x7}}}, {{@uncond, 0x0, 0x98, 0xf8, 0x0, {}, [@common=@ttl={{0x28}, {0x3, 0x8}}]}, @common=@CLUSTERIP={0x60, 'CLUSTERIP\x00', 0x0, {0x0, @random="92d65d3bab4b", 0x7fff, 0x0, [0x14, 0x10, 0x29, 0x39, 0x2c, 0x34, 0x2e, 0x3, 0xa, 0x14, 0x19, 0x29, 0x36, 0xb, 0x1d, 0x2a], 0x1, 0x6, 0x4}}}, {{@uncond, 0x0, 0x1a8, 0x1f0, 0x0, {}, [@common=@inet=@recent1={{0x108}, {0x1, 0x4, 0x0, 0x1, 'syz1\x00', 0x1, [0xffffffff, 0xffffff00, 0xffffffff, 0xffffffff]}}, @common=@addrtype={{0x30}, {0x800, 0x86, 0x0, 0x1}}]}, @common=@inet=@TEE={0x48, 'TEE\x00', 0x1, {@ipv4=@loopback, 'syzkaller0\x00', {0x80000000}}}}], {{'\x00', 0x0, 0x70, 0x98}, {0x28}}}}, 0x570) [ 2804.434016][T21144] bond1047: entered promiscuous mode [ 2804.446110][T21144] 8021q: adding VLAN 0 to HW filter on device bond1047 [ 2804.517424][T21150] bond1047: (slave bridge1074): making interface the new active one [ 2804.528076][T21150] bridge1074: entered promiscuous mode [ 2804.593677][T21150] bond1047: (slave bridge1074): Enslaving as an active interface with an up link 17:02:49 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:02:49 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x4) [ 2804.673922][T21184] x_tables: ip_tables: tcp match: only valid for protocol 6 [ 2804.744594][T21156] bond627: entered promiscuous mode [ 2804.750528][T21156] 8021q: adding VLAN 0 to HW filter on device bond627 17:02:49 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x10}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2804.863917][T21163] bond750: entered promiscuous mode [ 2804.874617][T21163] 8021q: adding VLAN 0 to HW filter on device bond750 17:02:49 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x81000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2804.987614][T21180] bond980: entered promiscuous mode [ 2804.994830][T21180] 8021q: adding VLAN 0 to HW filter on device bond980 [ 2805.106358][T21185] bond980: (slave bridge1046): making interface the new active one [ 2805.115423][T21185] bridge1046: entered promiscuous mode [ 2805.130364][T21185] bond980: (slave bridge1046): Enslaving as an active interface with an up link 17:02:50 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xecab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2805.262619][T21195] bond1048: entered promiscuous mode [ 2805.273597][T21195] 8021q: adding VLAN 0 to HW filter on device bond1048 17:02:50 executing program 0: pipe(&(0x7f0000000280)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_CT_NEW(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000000)=ANY=[@ANYBLOB="8000000000010b04000000000000000002000000240001801400018008000100bc141440080002007f0020110c00028005000100000000001c002280080003400000000008000177f2267a000800024000000000240002801400018008000100ac9414bb08000200000000000c0002800500010000002000080007"], 0x80}}, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r3, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r3, 0x0) preadv(r3, &(0x7f00000000c0), 0x0, 0x0, 0x5) write$binfmt_misc(r1, &(0x7f0000000000)=ANY=[], 0xfffffecc) r4 = socket$netlink(0x10, 0x3, 0x0) socket$nl_netfilter(0x10, 0x3, 0xc) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) sendfile(r5, r6, 0x0, 0xf03b0000) sendfile(r5, r5, &(0x7f0000000180), 0x0) setsockopt$netlink_NETLINK_ADD_MEMBERSHIP(r3, 0x10e, 0xb, &(0x7f0000000340)=0xc, 0x4) sendmsg$netlink(r4, &(0x7f0000002b40)={0x0, 0x0, &(0x7f0000000180)=[{&(0x7f00000001c0)={0x14, 0x1e, 0x723, 0x0, 0x0, "", [@generic="88"]}, 0x14}], 0x1}, 0x0) r7 = socket$nl_generic(0x10, 0x3, 0x10) r8 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000080), 0xffffffffffffffff) sendmsg$TIPC_NL_MEDIA_SET(r7, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000240)={&(0x7f00000000c0)={0x14, r8, 0x401}, 0x14}}, 0x0) sendmsg$TIPC_NL_LINK_SET(r4, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x20, r8, 0x4, 0x70bd2a, 0x25dfdbff, {}, [@TIPC_NLA_SOCK={0xc, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_CON={0x4}]}]}, 0x20}}, 0x40) sendmsg$TIPC_NL_LINK_SET(r0, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f00000000c0)={&(0x7f0000000440)={0x1a8, r8, 0x81b, 0x70bd25, 0x25dfdbfe, {}, [@TIPC_NLA_MON={0x2c, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x6}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x651}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x2}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x1}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0xffff9624}]}, @TIPC_NLA_LINK={0xf8, 0x4, 0x0, 0x1, [@TIPC_NLA_LINK_NAME={0x9, 0x1, 'syz0\x00'}, @TIPC_NLA_LINK_PROP={0x2c, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x6}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x9}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x6}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x800}]}, @TIPC_NLA_LINK_PROP={0x44, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x525c}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x4f82}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x2}, @TIPC_NLA_PROP_TOL={0x8}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x18}]}, @TIPC_NLA_LINK_NAME={0x13, 0x1, 'broadcast-link\x00'}, @TIPC_NLA_LINK_NAME={0x13, 0x1, 'broadcast-link\x00'}, @TIPC_NLA_LINK_PROP={0x1c, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x9}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x8}]}, @TIPC_NLA_LINK_PROP={0x34, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_WIN={0x8, 0x3, 0x5}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1b}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x300}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x2}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x5}]}]}, @TIPC_NLA_BEARER={0x4c, 0x1, 0x0, 0x1, [@TIPC_NLA_BEARER_NAME={0xd, 0x1, @udp='udp:syz1\x00'}, @TIPC_NLA_BEARER_UDP_OPTS={0x38, 0x4, {{0x20, 0x1, @in6={0xa, 0x4e23, 0x1ff, @mcast1, 0x7ff}}, {0x14, 0x2, @in={0x2, 0x4e20, @local}}}}]}, @TIPC_NLA_SOCK={0x8, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_HAS_PUBL={0x4}]}, @TIPC_NLA_MON={0x1c, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x4}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x4}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x5}]}]}, 0x1a8}, 0x1, 0x0, 0x0, 0x2404c085}, 0x90) splice(r0, 0x0, r2, 0x0, 0x4ffe6, 0x4000000000000) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) sendfile(r9, r10, 0x0, 0xf03b0000) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r11, r10, &(0x7f00000002c0)=0x2e, 0x8) sendfile(r0, r10, 0x0, 0x8) 17:02:50 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x6) [ 2805.486551][T21199] bond1048: (slave bridge1075): making interface the new active one [ 2805.497585][T21199] bridge1075: entered promiscuous mode [ 2805.526517][T21199] bond1048: (slave bridge1075): Enslaving as an active interface with an up link [ 2805.606092][T21201] bond628: entered promiscuous mode [ 2805.611991][T21201] 8021q: adding VLAN 0 to HW filter on device bond628 17:02:50 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x74, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:02:50 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x39}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2805.692349][T21205] bond751: entered promiscuous mode [ 2805.703724][T21205] 8021q: adding VLAN 0 to HW filter on device bond751 17:02:50 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x88a8ffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2805.854454][T21209] bond981: entered promiscuous mode [ 2805.860571][T21209] 8021q: adding VLAN 0 to HW filter on device bond981 [ 2805.987432][T21210] bond981: (slave bridge1047): making interface the new active one [ 2806.008062][T21210] bridge1047: entered promiscuous mode [ 2806.019870][T21210] bond981: (slave bridge1047): Enslaving as an active interface with an up link 17:02:51 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xedab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:02:51 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x8) [ 2806.176309][T21227] bond629: entered promiscuous mode [ 2806.205013][T21227] 8021q: adding VLAN 0 to HW filter on device bond629 17:02:51 executing program 0: pipe(&(0x7f0000000280)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_CT_NEW(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000000)=ANY=[@ANYBLOB="8000000000010b04000000000000000002000000240001801400018008000100bc141440080002007f0020110c00028005000100000000001c002280080003400000000008000177f2267a000800024000000000240002801400018008000100ac9414bb08000200000000000c0002800500010000002000080007"], 0x80}}, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r3, &(0x7f0000000000)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r3, 0x0) preadv(r3, &(0x7f00000000c0), 0x0, 0x0, 0x5) (async, rerun: 64) write$binfmt_misc(r1, &(0x7f0000000000)=ANY=[], 0xfffffecc) (async, rerun: 64) r4 = socket$netlink(0x10, 0x3, 0x0) socket$nl_netfilter(0x10, 0x3, 0xc) (async, rerun: 32) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (rerun: 32) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) (async, rerun: 32) sendfile(r5, r6, 0x0, 0xf03b0000) (async, rerun: 32) sendfile(r5, r5, &(0x7f0000000180), 0x0) (async) setsockopt$netlink_NETLINK_ADD_MEMBERSHIP(r3, 0x10e, 0xb, &(0x7f0000000340)=0xc, 0x4) (async) sendmsg$netlink(r4, &(0x7f0000002b40)={0x0, 0x0, &(0x7f0000000180)=[{&(0x7f00000001c0)={0x14, 0x1e, 0x723, 0x0, 0x0, "", [@generic="88"]}, 0x14}], 0x1}, 0x0) r7 = socket$nl_generic(0x10, 0x3, 0x10) (async) r8 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000080), 0xffffffffffffffff) sendmsg$TIPC_NL_MEDIA_SET(r7, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000240)={&(0x7f00000000c0)={0x14, r8, 0x401}, 0x14}}, 0x0) (async) sendmsg$TIPC_NL_LINK_SET(r4, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x20, r8, 0x4, 0x70bd2a, 0x25dfdbff, {}, [@TIPC_NLA_SOCK={0xc, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_CON={0x4}]}]}, 0x20}}, 0x40) sendmsg$TIPC_NL_LINK_SET(r0, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f00000000c0)={&(0x7f0000000440)={0x1a8, r8, 0x81b, 0x70bd25, 0x25dfdbfe, {}, [@TIPC_NLA_MON={0x2c, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x6}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x651}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x2}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x1}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0xffff9624}]}, @TIPC_NLA_LINK={0xf8, 0x4, 0x0, 0x1, [@TIPC_NLA_LINK_NAME={0x9, 0x1, 'syz0\x00'}, @TIPC_NLA_LINK_PROP={0x2c, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x6}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x9}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x6}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x800}]}, @TIPC_NLA_LINK_PROP={0x44, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x525c}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x4f82}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x2}, @TIPC_NLA_PROP_TOL={0x8}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x18}]}, @TIPC_NLA_LINK_NAME={0x13, 0x1, 'broadcast-link\x00'}, @TIPC_NLA_LINK_NAME={0x13, 0x1, 'broadcast-link\x00'}, @TIPC_NLA_LINK_PROP={0x1c, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x9}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x8}]}, @TIPC_NLA_LINK_PROP={0x34, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_WIN={0x8, 0x3, 0x5}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1b}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x300}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x2}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x5}]}]}, @TIPC_NLA_BEARER={0x4c, 0x1, 0x0, 0x1, [@TIPC_NLA_BEARER_NAME={0xd, 0x1, @udp='udp:syz1\x00'}, @TIPC_NLA_BEARER_UDP_OPTS={0x38, 0x4, {{0x20, 0x1, @in6={0xa, 0x4e23, 0x1ff, @mcast1, 0x7ff}}, {0x14, 0x2, @in={0x2, 0x4e20, @local}}}}]}, @TIPC_NLA_SOCK={0x8, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_HAS_PUBL={0x4}]}, @TIPC_NLA_MON={0x1c, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x4}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x4}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x5}]}]}, 0x1a8}, 0x1, 0x0, 0x0, 0x2404c085}, 0x90) splice(r0, 0x0, r2, 0x0, 0x4ffe6, 0x4000000000000) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) (async) sendfile(r9, r10, 0x0, 0xf03b0000) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r11, r10, &(0x7f00000002c0)=0x2e, 0x8) (async) sendfile(r0, r10, 0x0, 0x8) [ 2806.346153][T21230] bond1049: entered promiscuous mode [ 2806.353471][T21230] 8021q: adding VLAN 0 to HW filter on device bond1049 17:02:51 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x63}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2806.445239][T21235] bond1049: (slave bridge1076): making interface the new active one [ 2806.466094][T21235] bridge1076: entered promiscuous mode [ 2806.478587][T21235] bond1049: (slave bridge1076): Enslaving as an active interface with an up link [ 2806.509748][T21237] validate_nla: 12 callbacks suppressed [ 2806.509773][T21237] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:02:51 executing program 0: pipe(&(0x7f0000000280)={0xffffffffffffffff, 0xffffffffffffffff}) (async) r2 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_CT_NEW(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000000)=ANY=[@ANYBLOB="8000000000010b04000000000000000002000000240001801400018008000100bc141440080002007f0020110c00028005000100000000001c002280080003400000000008000177f2267a000800024000000000240002801400018008000100ac9414bb08000200000000000c0002800500010000002000080007"], 0x80}}, 0x0) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r3, &(0x7f0000000000)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r3, 0x0) preadv(r3, &(0x7f00000000c0), 0x0, 0x0, 0x5) (async) write$binfmt_misc(r1, &(0x7f0000000000)=ANY=[], 0xfffffecc) (async) r4 = socket$netlink(0x10, 0x3, 0x0) (async) socket$nl_netfilter(0x10, 0x3, 0xc) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) (async) sendfile(r5, r6, 0x0, 0xf03b0000) (async) sendfile(r5, r5, &(0x7f0000000180), 0x0) (async) setsockopt$netlink_NETLINK_ADD_MEMBERSHIP(r3, 0x10e, 0xb, &(0x7f0000000340)=0xc, 0x4) sendmsg$netlink(r4, &(0x7f0000002b40)={0x0, 0x0, &(0x7f0000000180)=[{&(0x7f00000001c0)={0x14, 0x1e, 0x723, 0x0, 0x0, "", [@generic="88"]}, 0x14}], 0x1}, 0x0) (async) r7 = socket$nl_generic(0x10, 0x3, 0x10) r8 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000080), 0xffffffffffffffff) sendmsg$TIPC_NL_MEDIA_SET(r7, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000240)={&(0x7f00000000c0)={0x14, r8, 0x401}, 0x14}}, 0x0) (async) sendmsg$TIPC_NL_LINK_SET(r4, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x20, r8, 0x4, 0x70bd2a, 0x25dfdbff, {}, [@TIPC_NLA_SOCK={0xc, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_CON={0x4}]}]}, 0x20}}, 0x40) sendmsg$TIPC_NL_LINK_SET(r0, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f00000000c0)={&(0x7f0000000440)={0x1a8, r8, 0x81b, 0x70bd25, 0x25dfdbfe, {}, [@TIPC_NLA_MON={0x2c, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x6}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x651}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x2}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x1}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0xffff9624}]}, @TIPC_NLA_LINK={0xf8, 0x4, 0x0, 0x1, [@TIPC_NLA_LINK_NAME={0x9, 0x1, 'syz0\x00'}, @TIPC_NLA_LINK_PROP={0x2c, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x6}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x9}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x6}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x800}]}, @TIPC_NLA_LINK_PROP={0x44, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x525c}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x4f82}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x2}, @TIPC_NLA_PROP_TOL={0x8}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x18}]}, @TIPC_NLA_LINK_NAME={0x13, 0x1, 'broadcast-link\x00'}, @TIPC_NLA_LINK_NAME={0x13, 0x1, 'broadcast-link\x00'}, @TIPC_NLA_LINK_PROP={0x1c, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x9}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x8}]}, @TIPC_NLA_LINK_PROP={0x34, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_WIN={0x8, 0x3, 0x5}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1b}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x300}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x2}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x5}]}]}, @TIPC_NLA_BEARER={0x4c, 0x1, 0x0, 0x1, [@TIPC_NLA_BEARER_NAME={0xd, 0x1, @udp='udp:syz1\x00'}, @TIPC_NLA_BEARER_UDP_OPTS={0x38, 0x4, {{0x20, 0x1, @in6={0xa, 0x4e23, 0x1ff, @mcast1, 0x7ff}}, {0x14, 0x2, @in={0x2, 0x4e20, @local}}}}]}, @TIPC_NLA_SOCK={0x8, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_HAS_PUBL={0x4}]}, @TIPC_NLA_MON={0x1c, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x4}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x4}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x5}]}]}, 0x1a8}, 0x1, 0x0, 0x0, 0x2404c085}, 0x90) splice(r0, 0x0, r2, 0x0, 0x4ffe6, 0x4000000000000) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) sendfile(r9, r10, 0x0, 0xf03b0000) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r11, r10, &(0x7f00000002c0)=0x2e, 0x8) (async) sendfile(r0, r10, 0x0, 0x8) 17:02:51 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2806.634708][T21237] bond752: entered promiscuous mode [ 2806.641211][T21237] 8021q: adding VLAN 0 to HW filter on device bond752 [ 2806.673899][T21239] bridge887: entered promiscuous mode [ 2806.679625][T21239] bridge887: entered allmulticast mode 17:02:51 executing program 0: pipe(&(0x7f0000000280)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_CT_NEW(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000000)=ANY=[@ANYBLOB="8000000000010b04000000000000000002000000240001801400018008000100bc141440080002007f0020110c00028005000100000000001c002280080003400000000008000177f2267a000800024000000000240002801400018008000100ac9414bb08000200000000000c0002800500010000002000080007"], 0x80}}, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r3, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r3, 0x0) preadv(r3, &(0x7f00000000c0), 0x0, 0x0, 0x5) write$binfmt_misc(r1, &(0x7f0000000000)=ANY=[], 0xfffffecc) r4 = socket$netlink(0x10, 0x3, 0x0) socket$nl_netfilter(0x10, 0x3, 0xc) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) sendfile(r5, r6, 0x0, 0xf03b0000) sendfile(r5, r5, &(0x7f0000000180), 0x0) setsockopt$netlink_NETLINK_ADD_MEMBERSHIP(r3, 0x10e, 0xb, &(0x7f0000000340)=0xc, 0x4) sendmsg$netlink(r4, &(0x7f0000002b40)={0x0, 0x0, &(0x7f0000000180)=[{&(0x7f00000001c0)={0x14, 0x1e, 0x723, 0x0, 0x0, "", [@generic="88"]}, 0x14}], 0x1}, 0x0) r7 = socket$nl_generic(0x10, 0x3, 0x10) r8 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000080), 0xffffffffffffffff) sendmsg$TIPC_NL_MEDIA_SET(r7, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000240)={&(0x7f00000000c0)={0x14, r8, 0x401}, 0x14}}, 0x0) sendmsg$TIPC_NL_LINK_SET(r4, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x20, r8, 0x4, 0x70bd2a, 0x25dfdbff, {}, [@TIPC_NLA_SOCK={0xc, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_CON={0x4}]}]}, 0x20}}, 0x40) sendmsg$TIPC_NL_LINK_SET(r0, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f00000000c0)={&(0x7f0000000440)={0x1a8, r8, 0x81b, 0x70bd25, 0x25dfdbfe, {}, [@TIPC_NLA_MON={0x2c, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x6}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x651}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x2}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x1}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0xffff9624}]}, @TIPC_NLA_LINK={0xf8, 0x4, 0x0, 0x1, [@TIPC_NLA_LINK_NAME={0x9, 0x1, 'syz0\x00'}, @TIPC_NLA_LINK_PROP={0x2c, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x6}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x9}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x6}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x800}]}, @TIPC_NLA_LINK_PROP={0x44, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x525c}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x4f82}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x2}, @TIPC_NLA_PROP_TOL={0x8}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x18}]}, @TIPC_NLA_LINK_NAME={0x13, 0x1, 'broadcast-link\x00'}, @TIPC_NLA_LINK_NAME={0x13, 0x1, 'broadcast-link\x00'}, @TIPC_NLA_LINK_PROP={0x1c, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x9}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x8}]}, @TIPC_NLA_LINK_PROP={0x34, 0x7, 0x0, 0x1, [@TIPC_NLA_PROP_WIN={0x8, 0x3, 0x5}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1b}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x300}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x2}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x5}]}]}, @TIPC_NLA_BEARER={0x4c, 0x1, 0x0, 0x1, [@TIPC_NLA_BEARER_NAME={0xd, 0x1, @udp='udp:syz1\x00'}, @TIPC_NLA_BEARER_UDP_OPTS={0x38, 0x4, {{0x20, 0x1, @in6={0xa, 0x4e23, 0x1ff, @mcast1, 0x7ff}}, {0x14, 0x2, @in={0x2, 0x4e20, @local}}}}]}, @TIPC_NLA_SOCK={0x8, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_HAS_PUBL={0x4}]}, @TIPC_NLA_MON={0x1c, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x4}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x4}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0x5}]}]}, 0x1a8}, 0x1, 0x0, 0x0, 0x2404c085}, 0x90) splice(r0, 0x0, r2, 0x0, 0x4ffe6, 0x4000000000000) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) sendfile(r9, r10, 0x0, 0xf03b0000) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r11, r10, &(0x7f00000002c0)=0x2e, 0x8) sendfile(r0, r10, 0x0, 0x8) [ 2806.696832][T21243] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:02:51 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x95383abc}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2806.874125][T21243] bond982: entered promiscuous mode [ 2806.880304][T21243] 8021q: adding VLAN 0 to HW filter on device bond982 17:02:51 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xee030000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2806.951895][T21244] bond982: (slave bridge1048): making interface the new active one [ 2806.961107][T21244] bridge1048: entered promiscuous mode [ 2806.974838][T21244] bond982: (slave bridge1048): Enslaving as an active interface with an up link 17:02:52 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xa) [ 2807.109311][T21259] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:02:52 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x300}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2807.176176][T21259] bond630: entered promiscuous mode [ 2807.182772][T21259] 8021q: adding VLAN 0 to HW filter on device bond630 [ 2807.198118][T21273] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2807.335497][T21273] bond1050: entered promiscuous mode [ 2807.341936][T21273] 8021q: adding VLAN 0 to HW filter on device bond1050 [ 2807.399715][T21278] bond1050: (slave bridge1077): making interface the new active one [ 2807.407919][T21278] bridge1077: entered promiscuous mode [ 2807.431490][T21278] bond1050: (slave bridge1077): Enslaving as an active interface with an up link [ 2807.456076][T21282] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:02:52 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x84, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2807.527892][T21282] bond753: entered promiscuous mode [ 2807.548318][T21282] 8021q: adding VLAN 0 to HW filter on device bond753 17:02:52 executing program 0: socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000200)={&(0x7f0000000340)=@newlink={0x20, 0x10, 0x821}, 0x20}}, 0x0) [ 2807.621074][T21283] bridge888: entered allmulticast mode [ 2807.628320][T21287] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:02:52 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x9effffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2807.689998][T21287] bond983: entered promiscuous mode [ 2807.695899][T21287] 8021q: adding VLAN 0 to HW filter on device bond983 17:02:52 executing program 0: socket$nl_route(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000200)={&(0x7f0000000340)=@newlink={0x20, 0x10, 0x821}, 0x20}}, 0x0) 17:02:52 executing program 0: socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000200)={&(0x7f0000000340)=@newlink={0x20, 0x10, 0x821}, 0x20}}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000200)={&(0x7f0000000340)=@newlink={0x20, 0x10, 0x821}, 0x20}}, 0x0) [ 2807.875491][T21289] bond983: (slave bridge1049): making interface the new active one 17:02:52 executing program 0: sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000006c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="6c00000010000104000000000072f60000000000", @ANYRES32=0x0, @ANYBLOB="0524060000000000300012800b0001006272696467650000200002800c002e00fffff6ffffffffff050007001f000000060027000000000008000a00a8"], 0x6c}}, 0x0) socketpair(0x22, 0x2, 0x60cc, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) r1 = syz_genetlink_get_family_id$tipc2(&(0x7f00000003c0), 0xffffffffffffffff) sendmsg$TIPC_NL_MON_PEER_GET(r0, &(0x7f00000004c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000480)={&(0x7f0000000400)={0x50, r1, 0x800, 0x70bd2c, 0x25dfdbfb, {}, [@TIPC_NLA_PUBL={0x3c, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x1}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0xfffffff9}, @TIPC_NLA_PUBL_LOWER={0x8}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x2}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x8}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x1}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x400}]}]}, 0x50}, 0x1, 0x0, 0x0, 0x8001}, 0x24000000) r2 = socket(0x10, 0x3, 0x0) sendmmsg$alg(r2, &(0x7f0000000140), 0x4924b68, 0x0) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000280)={0x6, 0xe, &(0x7f0000000500)=ANY=[@ANYBLOB="1800823b84f4b047678b1b0600000600000000000000000400001838000094000000000000000000a3c485000000bc0000008510000008000000123700000000e0ff00000000000007000000050000000000000400000000185a0000ffffffff01000000000000002e357fff0400000095000000000000000000b576ad84f6ef3388a695b638339c04e181b14a27dcd29b42d15960fe8a9419e2b8ebaa3903d1fb5637a401caaec78ba81c556537133c350b5570cf3dc6b93655f890d518533856b697fe7a2ce3f6ab1f67becaf37d124d4bae519aa2b6f1ce6f140df979c1dec66acad63c53f9a60db940fd8c3d95b5e4990d42a0f90c7493c4dc4e7b1b0487a4"], &(0x7f0000000080)='syzkaller\x00', 0x9, 0x7b, &(0x7f00000000c0)=""/123, 0x40f00, 0x9a30bee9e01a1d76, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, &(0x7f0000000200)={0x1, 0x2}, 0x8, 0x10, &(0x7f0000000240)={0xfffffffd, 0xb, 0x7ff, 0x1}, 0x10}, 0x80) ioctl$FITRIM(r3, 0xc0185879, &(0x7f0000000300)={0x1, 0x71d, 0x5}) [ 2807.915926][T21289] bridge1049: entered promiscuous mode [ 2807.944007][T21289] bond983: (slave bridge1049): Enslaving as an active interface with an up link 17:02:52 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xeeab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2807.963625][T21299] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2808.075175][T21299] bond631: entered promiscuous mode [ 2808.093642][T21299] 8021q: adding VLAN 0 to HW filter on device bond631 17:02:53 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0xa00}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:02:53 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xc) [ 2808.202492][T21306] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2808.261096][T21306] bond1051: entered promiscuous mode [ 2808.266841][T21306] 8021q: adding VLAN 0 to HW filter on device bond1051 [ 2808.435182][T21308] bond1051: (slave bridge1078): making interface the new active one [ 2808.465330][T21308] bridge1078: entered promiscuous mode [ 2808.484720][T21308] bond1051: (slave bridge1078): Enslaving as an active interface with an up link [ 2808.500137][T21315] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:02:53 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x9e, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2808.570939][T21315] bond754: entered promiscuous mode [ 2808.579489][T21315] 8021q: adding VLAN 0 to HW filter on device bond754 17:02:53 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xbc3a3895}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2808.639328][T21320] bridge889: entered promiscuous mode [ 2808.645074][T21320] bridge889: entered allmulticast mode [ 2808.664261][T21327] netlink: 20 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2808.831794][T21330] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:02:53 executing program 0: sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000006c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="6c00000010000104000000000072f60000000000", @ANYRES32=0x0, @ANYBLOB="0524060000000000300012800b0001006272696467650000200002800c002e00fffff6ffffffffff050007001f000000060027000000000008000a00a8"], 0x6c}}, 0x0) (async) socketpair(0x22, 0x2, 0x60cc, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) r1 = syz_genetlink_get_family_id$tipc2(&(0x7f00000003c0), 0xffffffffffffffff) sendmsg$TIPC_NL_MON_PEER_GET(r0, &(0x7f00000004c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000480)={&(0x7f0000000400)={0x50, r1, 0x800, 0x70bd2c, 0x25dfdbfb, {}, [@TIPC_NLA_PUBL={0x3c, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x1}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0xfffffff9}, @TIPC_NLA_PUBL_LOWER={0x8}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x2}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x8}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x1}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x400}]}]}, 0x50}, 0x1, 0x0, 0x0, 0x8001}, 0x24000000) (async) r2 = socket(0x10, 0x3, 0x0) sendmmsg$alg(r2, &(0x7f0000000140), 0x4924b68, 0x0) (async) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000280)={0x6, 0xe, &(0x7f0000000500)=ANY=[@ANYBLOB="1800823b84f4b047678b1b0600000600000000000000000400001838000094000000000000000000a3c485000000bc0000008510000008000000123700000000e0ff00000000000007000000050000000000000400000000185a0000ffffffff01000000000000002e357fff0400000095000000000000000000b576ad84f6ef3388a695b638339c04e181b14a27dcd29b42d15960fe8a9419e2b8ebaa3903d1fb5637a401caaec78ba81c556537133c350b5570cf3dc6b93655f890d518533856b697fe7a2ce3f6ab1f67becaf37d124d4bae519aa2b6f1ce6f140df979c1dec66acad63c53f9a60db940fd8c3d95b5e4990d42a0f90c7493c4dc4e7b1b0487a4"], &(0x7f0000000080)='syzkaller\x00', 0x9, 0x7b, &(0x7f00000000c0)=""/123, 0x40f00, 0x9a30bee9e01a1d76, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, &(0x7f0000000200)={0x1, 0x2}, 0x8, 0x10, &(0x7f0000000240)={0xfffffffd, 0xb, 0x7ff, 0x1}, 0x10}, 0x80) ioctl$FITRIM(r3, 0xc0185879, &(0x7f0000000300)={0x1, 0x71d, 0x5}) [ 2808.885734][T21330] bond984: entered promiscuous mode [ 2808.893058][T21330] 8021q: adding VLAN 0 to HW filter on device bond984 17:02:53 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xefab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2808.953120][T21333] bond984: (slave bridge1050): making interface the new active one [ 2808.962324][T21333] bridge1050: entered promiscuous mode [ 2808.982530][T21333] bond984: (slave bridge1050): Enslaving as an active interface with an up link [ 2809.048312][T21337] bond632: entered promiscuous mode [ 2809.064086][T21337] 8021q: adding VLAN 0 to HW filter on device bond632 17:02:54 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x6300}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:02:54 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xe) [ 2809.226338][T21346] bond1052: entered promiscuous mode [ 2809.252813][T21346] 8021q: adding VLAN 0 to HW filter on device bond1052 [ 2809.415680][T21347] bond1052: (slave bridge1079): making interface the new active one [ 2809.424023][T21347] bridge1079: entered promiscuous mode [ 2809.455814][T21347] bond1052: (slave bridge1079): Enslaving as an active interface with an up link 17:02:54 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xaa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2809.556934][T21349] bond755: entered promiscuous mode [ 2809.564305][T21349] 8021q: adding VLAN 0 to HW filter on device bond755 17:02:54 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xf0ffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2809.615667][T21355] netlink: 20 bytes leftover after parsing attributes in process `syz-executor.0'. 17:02:54 executing program 0: sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000006c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="6c00000010000104000000000072f60000000000", @ANYRES32=0x0, @ANYBLOB="0524060000000000300012800b0001006272696467650000200002800c002e00fffff6ffffffffff050007001f000000060027000000000008000a00a8"], 0x6c}}, 0x0) socketpair(0x22, 0x2, 0x60cc, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) r1 = syz_genetlink_get_family_id$tipc2(&(0x7f00000003c0), 0xffffffffffffffff) sendmsg$TIPC_NL_MON_PEER_GET(r0, &(0x7f00000004c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000480)={&(0x7f0000000400)={0x50, r1, 0x800, 0x70bd2c, 0x25dfdbfb, {}, [@TIPC_NLA_PUBL={0x3c, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x1}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0xfffffff9}, @TIPC_NLA_PUBL_LOWER={0x8}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x2}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x8}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x1}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x400}]}]}, 0x50}, 0x1, 0x0, 0x0, 0x8001}, 0x24000000) r2 = socket(0x10, 0x3, 0x0) sendmmsg$alg(r2, &(0x7f0000000140), 0x4924b68, 0x0) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000280)={0x6, 0xe, &(0x7f0000000500)=ANY=[@ANYBLOB="1800823b84f4b047678b1b0600000600000000000000000400001838000094000000000000000000a3c485000000bc0000008510000008000000123700000000e0ff00000000000007000000050000000000000400000000185a0000ffffffff01000000000000002e357fff0400000095000000000000000000b576ad84f6ef3388a695b638339c04e181b14a27dcd29b42d15960fe8a9419e2b8ebaa3903d1fb5637a401caaec78ba81c556537133c350b5570cf3dc6b93655f890d518533856b697fe7a2ce3f6ab1f67becaf37d124d4bae519aa2b6f1ce6f140df979c1dec66acad63c53f9a60db940fd8c3d95b5e4990d42a0f90c7493c4dc4e7b1b0487a4"], &(0x7f0000000080)='syzkaller\x00', 0x9, 0x7b, &(0x7f00000000c0)=""/123, 0x40f00, 0x9a30bee9e01a1d76, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, &(0x7f0000000200)={0x1, 0x2}, 0x8, 0x10, &(0x7f0000000240)={0xfffffffd, 0xb, 0x7ff, 0x1}, 0x10}, 0x80) ioctl$FITRIM(r3, 0xc0185879, &(0x7f0000000300)={0x1, 0x71d, 0x5}) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000006c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="6c00000010000104000000000072f60000000000", @ANYRES32=0x0, @ANYBLOB="0524060000000000300012800b0001006272696467650000200002800c002e00fffff6ffffffffff050007001f000000060027000000000008000a00a8"], 0x6c}}, 0x0) (async) socketpair(0x22, 0x2, 0x60cc, &(0x7f0000000000)) (async) syz_genetlink_get_family_id$tipc2(&(0x7f00000003c0), 0xffffffffffffffff) (async) sendmsg$TIPC_NL_MON_PEER_GET(r0, &(0x7f00000004c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000480)={&(0x7f0000000400)={0x50, r1, 0x800, 0x70bd2c, 0x25dfdbfb, {}, [@TIPC_NLA_PUBL={0x3c, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x1}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0xfffffff9}, @TIPC_NLA_PUBL_LOWER={0x8}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x2}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x8}, @TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x1}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x400}]}]}, 0x50}, 0x1, 0x0, 0x0, 0x8001}, 0x24000000) (async) socket(0x10, 0x3, 0x0) (async) sendmmsg$alg(r2, &(0x7f0000000140), 0x4924b68, 0x0) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000280)={0x6, 0xe, &(0x7f0000000500)=ANY=[@ANYBLOB="1800823b84f4b047678b1b0600000600000000000000000400001838000094000000000000000000a3c485000000bc0000008510000008000000123700000000e0ff00000000000007000000050000000000000400000000185a0000ffffffff01000000000000002e357fff0400000095000000000000000000b576ad84f6ef3388a695b638339c04e181b14a27dcd29b42d15960fe8a9419e2b8ebaa3903d1fb5637a401caaec78ba81c556537133c350b5570cf3dc6b93655f890d518533856b697fe7a2ce3f6ab1f67becaf37d124d4bae519aa2b6f1ce6f140df979c1dec66acad63c53f9a60db940fd8c3d95b5e4990d42a0f90c7493c4dc4e7b1b0487a4"], &(0x7f0000000080)='syzkaller\x00', 0x9, 0x7b, &(0x7f00000000c0)=""/123, 0x40f00, 0x9a30bee9e01a1d76, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, &(0x7f0000000200)={0x1, 0x2}, 0x8, 0x10, &(0x7f0000000240)={0xfffffffd, 0xb, 0x7ff, 0x1}, 0x10}, 0x80) (async) ioctl$FITRIM(r3, 0xc0185879, &(0x7f0000000300)={0x1, 0x71d, 0x5}) (async) [ 2809.845025][T21360] bond985: entered promiscuous mode [ 2809.865174][T21360] 8021q: adding VLAN 0 to HW filter on device bond985 17:02:55 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf0020000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2809.985234][T21362] bond985: (slave bridge1051): making interface the new active one [ 2809.994285][T21362] bridge1051: entered promiscuous mode [ 2810.009245][T21362] bond985: (slave bridge1051): Enslaving as an active interface with an up link 17:02:55 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0xff7f}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:02:55 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x10) [ 2810.063950][T21367] workqueue: Failed to create a rescuer kthread for wq "bond633": -EINTR [ 2810.343773][T21374] bond1053: entered promiscuous mode [ 2810.375184][T21374] 8021q: adding VLAN 0 to HW filter on device bond1053 [ 2810.557204][T21375] bond1053: (slave bridge1080): making interface the new active one [ 2810.570343][T21375] bridge1080: entered promiscuous mode [ 2810.588127][T21375] bond1053: (slave bridge1080): Enslaving as an active interface with an up link 17:02:55 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xba, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2810.613851][T21378] workqueue: Failed to create a rescuer kthread for wq "bond756": -EINTR [ 2810.688189][T21379] bridge891: entered promiscuous mode [ 2810.724789][T21379] bridge891: entered allmulticast mode [ 2810.748769][T21383] netlink: 20 bytes leftover after parsing attributes in process `syz-executor.0'. 17:02:55 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xffffa888}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:02:55 executing program 0: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$netlbl_mgmt(&(0x7f0000001300), r0) r2 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$netlbl_calipso(&(0x7f0000000440), r0) sendmsg$NLBL_CALIPSO_C_REMOVE(r0, &(0x7f0000000500)={&(0x7f0000000340)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f00000004c0)={&(0x7f0000000480)={0x1c, r3, 0x2, 0x70bd29, 0x25dfdbfc, {}, [@NLBL_CALIPSO_A_MTYPE={0x8}]}, 0x1c}, 0x1, 0x0, 0x0, 0x8048080}, 0x5eaafd49763d304c) sendmsg$NLBL_MGMT_C_ADD(r2, &(0x7f00000014c0)={&(0x7f00000013c0)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f0000001480)={&(0x7f00000003c0)={0x58, r1, 0x1, 0x0, 0x0, {}, [@NLBL_MGMT_A_DOMAIN={0xa, 0x1, '\aE\x97\v\xe0n'}, @NLBL_MGMT_A_IPV6MASK={0x14, 0x6, @mcast1}, @NLBL_MGMT_A_PROTOCOL={0x8}, @NLBL_MGMT_A_IPV6MASK={0x14, 0x6, @rand_addr=' \x01\x00'}, @NLBL_MGMT_A_IPV4MASK={0x8}]}, 0x58}}, 0x0) ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f0000000140)={'wpan3\x00', 0x0}) sendmsg$NL802154_CMD_SET_PAN_ID(r2, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000180)={0x40, 0x0, 0x800, 0x70bd2b, 0x25dfdbff, {}, [@NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x200000002}, @NL802154_ATTR_PAN_ID={0x6, 0x9, 0xffff}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_IFINDEX={0x8, 0x3, r4}]}, 0x40}, 0x1, 0x0, 0x0, 0x20000004}, 0x4000) r5 = syz_genetlink_get_family_id$devlink(&(0x7f0000000380), 0xffffffffffffffff) r6 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$DEVLINK_CMD_TRAP_GROUP_SET(r6, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)={0x4c, r5, 0x503, 0x0, 0x0, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0xd}, {0x5, 0x83, 0x5}}]}, 0x4c}}, 0x0) 17:02:56 executing program 0: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$netlbl_mgmt(&(0x7f0000001300), r0) (async) r2 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) r3 = syz_genetlink_get_family_id$netlbl_calipso(&(0x7f0000000440), r0) sendmsg$NLBL_CALIPSO_C_REMOVE(r0, &(0x7f0000000500)={&(0x7f0000000340)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f00000004c0)={&(0x7f0000000480)={0x1c, r3, 0x2, 0x70bd29, 0x25dfdbfc, {}, [@NLBL_CALIPSO_A_MTYPE={0x8}]}, 0x1c}, 0x1, 0x0, 0x0, 0x8048080}, 0x5eaafd49763d304c) (async) sendmsg$NLBL_MGMT_C_ADD(r2, &(0x7f00000014c0)={&(0x7f00000013c0)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f0000001480)={&(0x7f00000003c0)={0x58, r1, 0x1, 0x0, 0x0, {}, [@NLBL_MGMT_A_DOMAIN={0xa, 0x1, '\aE\x97\v\xe0n'}, @NLBL_MGMT_A_IPV6MASK={0x14, 0x6, @mcast1}, @NLBL_MGMT_A_PROTOCOL={0x8}, @NLBL_MGMT_A_IPV6MASK={0x14, 0x6, @rand_addr=' \x01\x00'}, @NLBL_MGMT_A_IPV4MASK={0x8}]}, 0x58}}, 0x0) ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f0000000140)={'wpan3\x00', 0x0}) sendmsg$NL802154_CMD_SET_PAN_ID(r2, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000180)={0x40, 0x0, 0x800, 0x70bd2b, 0x25dfdbff, {}, [@NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x200000002}, @NL802154_ATTR_PAN_ID={0x6, 0x9, 0xffff}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_IFINDEX={0x8, 0x3, r4}]}, 0x40}, 0x1, 0x0, 0x0, 0x20000004}, 0x4000) r5 = syz_genetlink_get_family_id$devlink(&(0x7f0000000380), 0xffffffffffffffff) (async) r6 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$DEVLINK_CMD_TRAP_GROUP_SET(r6, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)={0x4c, r5, 0x503, 0x0, 0x0, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0xd}, {0x5, 0x83, 0x5}}]}, 0x4c}}, 0x0) [ 2811.029465][T21392] bond986: entered promiscuous mode [ 2811.046404][T21392] 8021q: adding VLAN 0 to HW filter on device bond986 17:02:56 executing program 0: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$netlbl_mgmt(&(0x7f0000001300), r0) (async) r2 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$netlbl_calipso(&(0x7f0000000440), r0) sendmsg$NLBL_CALIPSO_C_REMOVE(r0, &(0x7f0000000500)={&(0x7f0000000340)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f00000004c0)={&(0x7f0000000480)={0x1c, r3, 0x2, 0x70bd29, 0x25dfdbfc, {}, [@NLBL_CALIPSO_A_MTYPE={0x8}]}, 0x1c}, 0x1, 0x0, 0x0, 0x8048080}, 0x5eaafd49763d304c) (async) sendmsg$NLBL_MGMT_C_ADD(r2, &(0x7f00000014c0)={&(0x7f00000013c0)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f0000001480)={&(0x7f00000003c0)={0x58, r1, 0x1, 0x0, 0x0, {}, [@NLBL_MGMT_A_DOMAIN={0xa, 0x1, '\aE\x97\v\xe0n'}, @NLBL_MGMT_A_IPV6MASK={0x14, 0x6, @mcast1}, @NLBL_MGMT_A_PROTOCOL={0x8}, @NLBL_MGMT_A_IPV6MASK={0x14, 0x6, @rand_addr=' \x01\x00'}, @NLBL_MGMT_A_IPV4MASK={0x8}]}, 0x58}}, 0x0) (async) ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f0000000140)={'wpan3\x00', 0x0}) sendmsg$NL802154_CMD_SET_PAN_ID(r2, &(0x7f0000000200)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000180)={0x40, 0x0, 0x800, 0x70bd2b, 0x25dfdbff, {}, [@NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x200000002}, @NL802154_ATTR_PAN_ID={0x6, 0x9, 0xffff}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_IFINDEX={0x8, 0x3, r4}]}, 0x40}, 0x1, 0x0, 0x0, 0x20000004}, 0x4000) (async) r5 = syz_genetlink_get_family_id$devlink(&(0x7f0000000380), 0xffffffffffffffff) (async) r6 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$DEVLINK_CMD_TRAP_GROUP_SET(r6, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)={0x4c, r5, 0x503, 0x0, 0x0, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0xd}, {0x5, 0x83, 0x5}}]}, 0x4c}}, 0x0) 17:02:56 executing program 0: unshare(0x40000400) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) unshare(0x0) r0 = socket$packet(0x11, 0x3, 0x300) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$nl_route(r2, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000040)={0x0}, 0x1, 0x0, 0x0, 0x8818}, 0x4000) getsockname$packet(r2, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x28a) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000280)={&(0x7f00000001c0)={0x2c, 0x0, 0x1, 0x70bd2c, 0x25dfdbfc, {}, [@BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_VLANID={0x6, 0x28, 0x3}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0xa4a}]}, 0x2c}, 0x1, 0x0, 0x0, 0x4004044}, 0x40) sendmsg$nl_route(r4, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000240)=ANY=[@ANYBLOB="400000001000390400"/20, @ANYRES32, @ANYBLOB="0bf30000000000002000128008000100677265001400028008000100", @ANYRES32=r3, @ANYBLOB="08000700ac"], 0x40}}, 0x0) unshare(0x2000000) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) sendfile(r5, r6, 0x0, 0xf03b0000) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r7, r6, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$nl_route(r1, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f00000000c0)=ANY=[@ANYBLOB="40000000100039040000000000000000868804c700000000", @ANYRES32=r3, @ANYBLOB="01980000000000002000128008000100677265001400028008000100", @ANYRES8=r6, @ANYBLOB="08000700ac"], 0x40}}, 0x0) sendto$packet(r0, &(0x7f0000000000)='1', 0x500, 0xe, &(0x7f0000000200)={0x11, 0x0, r3, 0x1, 0x0, 0x6, @local}, 0x14) [ 2811.278241][T21397] bond986: (slave bridge1052): making interface the new active one [ 2811.287791][T21397] bridge1052: entered promiscuous mode [ 2811.311397][T21397] bond986: (slave bridge1052): Enslaving as an active interface with an up link 17:02:56 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf0ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:02:56 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x60) 17:02:56 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x2}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2811.379337][T21398] workqueue: Failed to create a rescuer kthread for wq "bond633": -EINTR [ 2811.671136][T21406] validate_nla: 9 callbacks suppressed [ 2811.671161][T21406] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2811.796198][T21406] bond1054: entered promiscuous mode [ 2811.815781][T21406] 8021q: adding VLAN 0 to HW filter on device bond1054 17:02:57 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xda, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2811.958004][T21408] bond1054: (slave bridge1081): making interface the new active one [ 2811.967504][T21408] bridge1081: entered promiscuous mode [ 2811.984831][T21408] bond1054: (slave bridge1081): Enslaving as an active interface with an up link [ 2811.994864][T21410] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:02:57 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xfffff000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2812.027917][T21410] workqueue: Failed to create a rescuer kthread for wq "bond756": -EINTR [ 2812.159862][T21430] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2812.213017][T21430] bond987: entered promiscuous mode [ 2812.219689][T21430] 8021q: adding VLAN 0 to HW filter on device bond987 [ 2812.350739][T21434] bond987: (slave bridge1053): making interface the new active one [ 2812.360817][T21434] bridge1053: entered promiscuous mode [ 2812.375134][T21434] bond987: (slave bridge1053): Enslaving as an active interface with an up link 17:02:57 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf0ffffff, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:02:57 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xf0) [ 2812.415201][T21437] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2812.499525][T21437] bond633: entered promiscuous mode [ 2812.505302][T21437] 8021q: adding VLAN 0 to HW filter on device bond633 [ 2812.524757][T21438] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. 17:02:57 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x3}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2812.623249][T21441] netlink: 32 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2812.649245][T21443] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2812.737977][T21443] bond1055: entered promiscuous mode [ 2812.754237][T21443] 8021q: adding VLAN 0 to HW filter on device bond1055 [ 2812.859321][T21445] bond1055: (slave bridge1082): making interface the new active one [ 2812.867760][T21445] bridge1082: entered promiscuous mode [ 2812.902341][T21445] bond1055: (slave bridge1082): Enslaving as an active interface with an up link [ 2812.923176][T21448] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:02:57 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2812.997644][T21448] bond756: entered promiscuous mode [ 2813.004768][T21448] 8021q: adding VLAN 0 to HW filter on device bond756 17:02:58 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xffffff7f}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2813.082011][T21456] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2813.160444][T21456] bond988: entered promiscuous mode [ 2813.166352][T21456] 8021q: adding VLAN 0 to HW filter on device bond988 17:02:58 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x600) [ 2813.328190][T21458] bond988: (slave bridge1054): making interface the new active one [ 2813.346044][T21458] bridge1054: entered promiscuous mode [ 2813.378553][T21458] bond988: (slave bridge1054): Enslaving as an active interface with an up link [ 2813.420320][T21462] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2813.569408][T21462] bond634: entered promiscuous mode [ 2813.575152][T21462] 8021q: adding VLAN 0 to HW filter on device bond634 [ 2813.625871][T21463] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. [ 2813.696846][T21469] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2813.801359][T21469] bond1056: entered promiscuous mode [ 2813.807132][T21469] 8021q: adding VLAN 0 to HW filter on device bond1056 [ 2813.873917][T21473] bond1056: (slave bridge1083): making interface the new active one [ 2813.883035][T21473] bridge1083: entered promiscuous mode [ 2813.894939][T21473] bond1056: (slave bridge1083): Enslaving as an active interface with an up link [ 2813.911113][T21471] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2813.963991][T21471] bond757: entered promiscuous mode [ 2813.973001][T21471] 8021q: adding VLAN 0 to HW filter on device bond757 [ 2814.011561][T21474] bridge894: entered promiscuous mode [ 2814.017039][T21474] bridge894: entered allmulticast mode [ 2814.064203][T21426] lo speed is unknown, defaulting to 1000 17:03:01 executing program 0: unshare(0x40000400) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) unshare(0x0) r0 = socket$packet(0x11, 0x3, 0x300) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$nl_route(r2, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000040)={0x0}, 0x1, 0x0, 0x0, 0x8818}, 0x4000) getsockname$packet(r2, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x28a) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000280)={&(0x7f00000001c0)={0x2c, 0x0, 0x1, 0x70bd2c, 0x25dfdbfc, {}, [@BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_VLANID={0x6, 0x28, 0x3}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0xa4a}]}, 0x2c}, 0x1, 0x0, 0x0, 0x4004044}, 0x40) sendmsg$nl_route(r4, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000240)=ANY=[@ANYBLOB="400000001000390400"/20, @ANYRES32, @ANYBLOB="0bf30000000000002000128008000100677265001400028008000100", @ANYRES32=r3, @ANYBLOB="08000700ac"], 0x40}}, 0x0) unshare(0x2000000) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) sendfile(r5, r6, 0x0, 0xf03b0000) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r7, r6, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$nl_route(r1, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f00000000c0)=ANY=[@ANYBLOB="40000000100039040000000000000000868804c700000000", @ANYRES32=r3, @ANYBLOB="01980000000000002000128008000100677265001400028008000100", @ANYRES8=r6, @ANYBLOB="08000700ac"], 0x40}}, 0x0) sendto$packet(r0, &(0x7f0000000000)='1', 0x500, 0xe, &(0x7f0000000200)={0x11, 0x0, r3, 0x1, 0x0, 0x6, @local}, 0x14) unshare(0x40000400) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) (async) unshare(0x0) (async) socket$packet(0x11, 0x3, 0x300) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000040)={0x0}, 0x1, 0x0, 0x0, 0x8818}, 0x4000) (async) getsockname$packet(r2, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x28a) (async) socket$nl_route(0x10, 0x3, 0x0) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000280)={&(0x7f00000001c0)={0x2c, 0x0, 0x1, 0x70bd2c, 0x25dfdbfc, {}, [@BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_VLANID={0x6, 0x28, 0x3}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0xa4a}]}, 0x2c}, 0x1, 0x0, 0x0, 0x4004044}, 0x40) (async) sendmsg$nl_route(r4, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000240)=ANY=[@ANYBLOB="400000001000390400"/20, @ANYRES32, @ANYBLOB="0bf30000000000002000128008000100677265001400028008000100", @ANYRES32=r3, @ANYBLOB="08000700ac"], 0x40}}, 0x0) (async) unshare(0x2000000) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) (async) sendfile(r5, r6, 0x0, 0xf03b0000) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) sendfile(r7, r6, &(0x7f00000002c0)=0x335773c3, 0x8) (async) sendmsg$nl_route(r1, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f00000000c0)=ANY=[@ANYBLOB="40000000100039040000000000000000868804c700000000", @ANYRES32=r3, @ANYBLOB="01980000000000002000128008000100677265001400028008000100", @ANYRES8=r6, @ANYBLOB="08000700ac"], 0x40}}, 0x0) (async) sendto$packet(r0, &(0x7f0000000000)='1', 0x500, 0xe, &(0x7f0000000200)={0x11, 0x0, r3, 0x1, 0x0, 0x6, @local}, 0x14) (async) 17:03:01 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf1ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:01 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x5}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:01 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xffffff9e}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:01 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x12a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:01 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xa00) [ 2816.838249][T21493] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2816.932507][T21493] bond635: entered promiscuous mode [ 2816.938227][T21493] 8021q: adding VLAN 0 to HW filter on device bond635 [ 2816.960573][T21488] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2817.038059][T21488] bond1057: entered promiscuous mode [ 2817.064575][T21488] 8021q: adding VLAN 0 to HW filter on device bond1057 [ 2817.103071][T21497] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2817.212069][T21497] bond989: entered promiscuous mode [ 2817.217726][T21497] 8021q: adding VLAN 0 to HW filter on device bond989 [ 2817.233717][T21496] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2817.307917][T21496] bond758: entered promiscuous mode [ 2817.314397][T21496] 8021q: adding VLAN 0 to HW filter on device bond758 17:03:02 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xc00) [ 2817.357090][T21503] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:02 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x6}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2817.517166][T21504] bond1057: (slave bridge1084): making interface the new active one [ 2817.536627][T21504] bridge1084: entered promiscuous mode 17:03:02 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x132, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2817.562903][T21504] bond1057: (slave bridge1084): Enslaving as an active interface with an up link 17:03:02 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf2020000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2817.637271][T21507] bond989: (slave bridge1055): making interface the new active one [ 2817.646048][T21507] bridge1055: entered promiscuous mode [ 2817.659476][T21507] bond989: (slave bridge1055): Enslaving as an active interface with an up link 17:03:02 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xfffffff0}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2817.712910][T21508] bridge895: entered promiscuous mode [ 2817.720330][T21508] bridge895: entered allmulticast mode [ 2817.776104][T21515] netlink: 32 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2817.829244][T21523] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2817.873889][T21523] bond636: entered promiscuous mode [ 2817.879910][T21523] 8021q: adding VLAN 0 to HW filter on device bond636 [ 2817.922803][T21525] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:02 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xe00) [ 2817.971973][T21527] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2818.030508][T21527] bond1058: entered promiscuous mode [ 2818.036335][T21527] 8021q: adding VLAN 0 to HW filter on device bond1058 [ 2818.182431][T21530] bond1058: (slave bridge1085): making interface the new active one [ 2818.191723][T21530] bridge1085: entered promiscuous mode [ 2818.204390][T21530] bond1058: (slave bridge1085): Enslaving as an active interface with an up link [ 2818.215244][T21532] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2818.289598][T21532] bond990: entered promiscuous mode [ 2818.295324][T21532] 8021q: adding VLAN 0 to HW filter on device bond990 [ 2818.311671][T21534] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2818.358383][T21534] bond759: entered promiscuous mode [ 2818.369614][T21534] 8021q: adding VLAN 0 to HW filter on device bond759 [ 2818.435189][T21536] bond990: (slave bridge1056): making interface the new active one [ 2818.456340][T21536] bridge1056: entered promiscuous mode [ 2818.478533][T21536] bond990: (slave bridge1056): Enslaving as an active interface with an up link [ 2818.527231][T21537] bridge896: entered promiscuous mode [ 2818.533973][T21537] bridge896: entered allmulticast mode [ 2818.605451][T21502] lo speed is unknown, defaulting to 1000 17:03:05 executing program 0: unshare(0x40000400) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) (async) unshare(0x0) (async) r0 = socket$packet(0x11, 0x3, 0x300) (async) r1 = socket$nl_route(0x10, 0x3, 0x0) (async) r2 = socket(0x10, 0x803, 0x0) sendmsg$nl_route(r2, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000040)={0x0}, 0x1, 0x0, 0x0, 0x8818}, 0x4000) getsockname$packet(r2, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x28a) (async) r4 = socket$nl_route(0x10, 0x3, 0x0) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000280)={&(0x7f00000001c0)={0x2c, 0x0, 0x1, 0x70bd2c, 0x25dfdbfc, {}, [@BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_VLANID={0x6, 0x28, 0x3}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0xa4a}]}, 0x2c}, 0x1, 0x0, 0x0, 0x4004044}, 0x40) sendmsg$nl_route(r4, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000240)=ANY=[@ANYBLOB="400000001000390400"/20, @ANYRES32, @ANYBLOB="0bf30000000000002000128008000100677265001400028008000100", @ANYRES32=r3, @ANYBLOB="08000700ac"], 0x40}}, 0x0) unshare(0x2000000) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) (async) sendfile(r5, r6, 0x0, 0xf03b0000) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r7, r6, &(0x7f00000002c0)=0x335773c3, 0x8) (async) sendmsg$nl_route(r1, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f00000000c0)=ANY=[@ANYBLOB="40000000100039040000000000000000868804c700000000", @ANYRES32=r3, @ANYBLOB="01980000000000002000128008000100677265001400028008000100", @ANYRES8=r6, @ANYBLOB="08000700ac"], 0x40}}, 0x0) (async) sendto$packet(r0, &(0x7f0000000000)='1', 0x500, 0xe, &(0x7f0000000200)={0x11, 0x0, r3, 0x1, 0x0, 0x6, @local}, 0x14) 17:03:05 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x7}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:05 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x142, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:05 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf2030000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:05 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0xffffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:05 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xec8) [ 2820.924952][T21551] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2821.040548][T21551] bond637: entered promiscuous mode [ 2821.054911][T21551] 8021q: adding VLAN 0 to HW filter on device bond637 [ 2821.069467][T21553] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2821.131513][T21553] bond1059: entered promiscuous mode [ 2821.137327][T21553] 8021q: adding VLAN 0 to HW filter on device bond1059 [ 2821.182361][T21554] bond991: entered promiscuous mode [ 2821.187985][T21554] 8021q: adding VLAN 0 to HW filter on device bond991 [ 2821.240354][T21557] bond760: entered promiscuous mode [ 2821.246637][T21557] 8021q: adding VLAN 0 to HW filter on device bond760 [ 2821.262293][T21562] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:06 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x9}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:06 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x27cb) [ 2821.408461][T21564] bond1059: (slave bridge1086): making interface the new active one [ 2821.418167][T21564] bridge1086: entered promiscuous mode [ 2821.445520][T21564] bond1059: (slave bridge1086): Enslaving as an active interface with an up link 17:03:06 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x14c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2821.548328][T21566] bond991: (slave bridge1057): making interface the new active one [ 2821.570109][T21566] bridge1057: entered promiscuous mode 17:03:06 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x5865) [ 2821.601224][T21566] bond991: (slave bridge1057): Enslaving as an active interface with an up link 17:03:06 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf2ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:06 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2821.655574][T21567] bridge897: entered promiscuous mode [ 2821.661724][T21567] bridge897: entered allmulticast mode [ 2821.798432][T21580] bond638: entered promiscuous mode [ 2821.805584][T21580] 8021q: adding VLAN 0 to HW filter on device bond638 [ 2821.819197][T21585] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. [ 2821.853948][T21588] validate_nla: 3 callbacks suppressed [ 2821.853973][T21588] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2821.933275][T21588] bond1060: entered promiscuous mode [ 2821.940081][T21588] 8021q: adding VLAN 0 to HW filter on device bond1060 [ 2822.004406][T21589] bond1060: (slave bridge1087): making interface the new active one [ 2822.013526][T21589] bridge1087: entered promiscuous mode [ 2822.026483][T21589] bond1060: (slave bridge1087): Enslaving as an active interface with an up link [ 2822.038919][T21595] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2822.107982][T21595] bond992: entered promiscuous mode [ 2822.124352][T21595] 8021q: adding VLAN 0 to HW filter on device bond992 [ 2822.282399][T21599] bond992: (slave bridge1058): making interface the new active one [ 2822.293442][T21599] bridge1058: entered promiscuous mode [ 2822.306415][T21599] bond992: (slave bridge1058): Enslaving as an active interface with an up link [ 2822.316809][T21601] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2822.375865][T21601] bond761: entered promiscuous mode [ 2822.383024][T21601] 8021q: adding VLAN 0 to HW filter on device bond761 [ 2822.433308][T21560] lo speed is unknown, defaulting to 1000 17:03:09 executing program 0: unshare(0x40000400) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x3, 0x8031, 0xffffffffffffffff, 0x0) unshare(0x0) r0 = socket$packet(0x11, 0x3, 0x300) r1 = socket$nl_route(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$nl_route(r2, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000040)={0x0}, 0x1, 0x0, 0x0, 0x8818}, 0x4000) getsockname$packet(r2, &(0x7f0000000140)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x28a) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000280)={&(0x7f00000001c0)={0x2c, 0x0, 0x1, 0x70bd2c, 0x25dfdbfc, {}, [@BATADV_ATTR_BONDING_ENABLED={0x5}, @BATADV_ATTR_VLANID={0x6, 0x28, 0x3}, @BATADV_ATTR_ISOLATION_MASK={0x8, 0x2c, 0xa4a}]}, 0x2c}, 0x1, 0x0, 0x0, 0x4004044}, 0x40) sendmsg$nl_route(r4, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000240)=ANY=[@ANYBLOB="400000001000390400"/20, @ANYRES32, @ANYBLOB="0bf30000000000002000128008000100677265001400028008000100", @ANYRES32=r3, @ANYBLOB="08000700ac"], 0x40}}, 0x0) unshare(0x2000000) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000200), 0xf000) sendfile(r5, r6, 0x0, 0xf03b0000) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r7, r6, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$nl_route(r1, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f00000000c0)=ANY=[@ANYBLOB="40000000100039040000000000000000868804c700000000", @ANYRES32=r3, @ANYBLOB="01980000000000002000128008000100677265001400028008000100", @ANYRES8=r6, @ANYBLOB="08000700ac"], 0x40}}, 0x0) sendto$packet(r0, &(0x7f0000000000)='1', 0x500, 0xe, &(0x7f0000000200)={0x11, 0x0, r3, 0x1, 0x0, 0x6, @local}, 0x14) 17:03:09 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0xa}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:09 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x6000) 17:03:09 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf3ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:09 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x15a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:09 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2824.730665][T21616] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2824.837295][T21616] bond1061: entered promiscuous mode [ 2824.854787][T21616] 8021q: adding VLAN 0 to HW filter on device bond1061 [ 2824.894481][T21622] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2824.972253][T21622] bond639: entered promiscuous mode [ 2824.978447][T21622] 8021q: adding VLAN 0 to HW filter on device bond639 [ 2825.011991][T21621] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2825.102832][T21621] bond762: entered promiscuous mode [ 2825.108542][T21621] 8021q: adding VLAN 0 to HW filter on device bond762 [ 2825.121649][T21617] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2825.227737][T21617] bond993: entered promiscuous mode [ 2825.234914][T21617] 8021q: adding VLAN 0 to HW filter on device bond993 17:03:10 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x6558) [ 2825.275864][T21627] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:10 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x10}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2825.467159][T21628] bond1061: (slave bridge1088): making interface the new active one [ 2825.486678][T21628] bridge1088: entered promiscuous mode 17:03:10 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x162, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2825.514478][T21628] bond1061: (slave bridge1088): Enslaving as an active interface with an up link 17:03:10 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2825.686455][T21630] bond993: (slave bridge1059): making interface the new active one [ 2825.705268][T21630] bridge1059: entered promiscuous mode [ 2825.733738][T21630] bond993: (slave bridge1059): Enslaving as an active interface with an up link 17:03:10 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf4020000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2825.811506][T21636] netlink: 32 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:10 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x8100) [ 2825.875517][T21645] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2825.957898][T21645] bond640: entered promiscuous mode [ 2825.965018][T21645] 8021q: adding VLAN 0 to HW filter on device bond640 [ 2825.980604][T21646] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. [ 2826.021519][T21649] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2826.177544][T21649] bond1062: entered promiscuous mode [ 2826.183735][T21649] 8021q: adding VLAN 0 to HW filter on device bond1062 [ 2826.206493][T21651] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2826.273605][T21651] bond763: entered promiscuous mode [ 2826.279319][T21651] 8021q: adding VLAN 0 to HW filter on device bond763 [ 2826.355535][T21653] bond1062: (slave bridge1089): making interface the new active one [ 2826.364700][T21653] bridge1089: entered promiscuous mode [ 2826.386736][T21653] bond1062: (slave bridge1089): Enslaving as an active interface with an up link [ 2826.447564][T21656] bond994: entered promiscuous mode [ 2826.454097][T21656] 8021q: adding VLAN 0 to HW filter on device bond994 [ 2826.522718][T21658] bond994: (slave bridge1060): making interface the new active one [ 2826.530803][T21658] bridge1060: entered promiscuous mode [ 2826.543564][T21658] bond994: (slave bridge1060): Enslaving as an active interface with an up link [ 2826.585973][T21625] lo speed is unknown, defaulting to 1000 17:03:13 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf0ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:13 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:13 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x16a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:13 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x6}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:13 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf4030000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:13 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xc80e) [ 2828.937542][T21674] validate_nla: 1 callbacks suppressed [ 2828.937568][T21674] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2829.046262][T21674] bond1063: entered promiscuous mode [ 2829.052485][T21674] 8021q: adding VLAN 0 to HW filter on device bond1063 [ 2829.067603][T21679] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2829.130155][T21679] bond995: entered promiscuous mode [ 2829.136076][T21679] 8021q: adding VLAN 0 to HW filter on device bond995 [ 2829.151313][T21678] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2829.212652][T21678] bond641: entered promiscuous mode [ 2829.218539][T21678] 8021q: adding VLAN 0 to HW filter on device bond641 [ 2829.234374][T21681] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2829.290887][T21681] bond764: entered promiscuous mode [ 2829.296521][T21681] 8021q: adding VLAN 0 to HW filter on device bond764 [ 2829.353390][T21683] bond1063: (slave bridge1090): making interface the new active one [ 2829.362798][T21683] bridge1090: entered promiscuous mode [ 2829.382329][T21683] bond1063: (slave bridge1090): Enslaving as an active interface with an up link 17:03:14 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x16c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2829.482516][T21686] bond995: (slave bridge1061): making interface the new active one [ 2829.490899][T21686] bridge1061: entered promiscuous mode [ 2829.512065][T21686] bond995: (slave bridge1061): Enslaving as an active interface with an up link 17:03:14 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf4ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2829.536710][T21688] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:14 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4, 0x3}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:14 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xcb27) 17:03:14 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x8}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2829.685904][T21685] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 2829.772828][T21685] bond492: entered promiscuous mode [ 2829.779445][T21685] 8021q: adding VLAN 0 to HW filter on device bond492 17:03:14 executing program 0: ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f00000005c0)={'wpan0\x00', 0x0}) sendmsg$NL802154_CMD_GET_SEC_DEVKEY(0xffffffffffffffff, &(0x7f0000000680)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000640)={&(0x7f0000000600)={0x30, 0x0, 0x300, 0x70bd2a, 0x25dfdbfb, {}, [@NL802154_ATTR_IFINDEX={0x8, 0x3, r0}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x100000001}]}, 0x30}, 0x1, 0x0, 0x0, 0x40}, 0x40080) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='cgroup.controllers\x00', 0x275a, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = accept4$netrom(r1, 0x0, &(0x7f00000003c0), 0x80000) getpeername$netrom(r3, &(0x7f00000006c0)={{0x3, @netrom}, [@default, @remote, @rose, @bcast, @rose, @default, @rose]}, &(0x7f0000000240)=0x84) r4 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000300), 0xffffffffffffffff) sendmsg$MPTCP_PM_CMD_ADD_ADDR(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000740)=ANY=[@ANYBLOB="300000007d6ec7b827381a27e980adf51d9db048aa62c830e075e7170dcef2ffee7f403d8519d137adfdf78365d5bf0ece21a8d119a64c5616e331c32f32bcf812a2889ea548dabcfe70bce5af616d6491a30ceb414ce4914f508460edb63304d668ccd07c51519e3e7eed23307b538e8c3796bb5ea0baf8f32f4f1a8107478ded06aeb3ef60ea2a80e0ae3a73c4862188d1bbbcc626d4df2206ac1831289a9a8da619a72186f3989da0b169320c590083e891db1d1ff4e27344dd3101eee1178f0b687799702f796697a5e84e618da0863deb7d31130441172b80c75a0f1f4295fe144e7160e6c39e9e12e7095dbb8850d3ef9d6ee1b5", @ANYRES16=r4, @ANYBLOB="8f1a0000000000000000010000001c0001800800060011000000080003007f0000010600010002000000"], 0x30}}, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r2, 0xc020f509, &(0x7f0000000040)={r1, 0xcf, 0x9, 0x6}) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000280)='cgroup.controllers\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000000), 0xffffff6a) ioctl$FIBMAP(r5, 0x1, &(0x7f00000000c0)) pipe(&(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) bpf$BPF_PROG_WITH_BTFID_LOAD(0x2, &(0x7f0000000000)=@bpf_lsm={0x1d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x1b, r6, 0x8, 0x0, 0x0, 0x10, 0x0, 0x0, 0xffffffffffffffff}, 0x80) write$cgroup_subtree(0xffffffffffffffff, &(0x7f0000000500)={[{0x2b, 'net'}, {0x2d, 'rdma'}, {0x2b, 'cpuset'}, {0x2d, 'io'}]}, 0x17) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='cpuset.effective_cpus\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000380), 0x101bf) ioctl$FS_IOC_RESVSP(r6, 0x40305828, &(0x7f0000000540)={0x0, 0x3, 0x6}) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r7, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r7, 0xc028660f, &(0x7f0000000180)={0x2880008, r5}) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) ioctl$FS_IOC_RESVSP(r7, 0x40305828, &(0x7f0000000080)={0x0, 0x0, 0x100000000, 0xfff}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) [ 2829.892579][T21690] bond492: (slave bridge887): making interface the new active one [ 2829.901826][T21690] bridge887: entered promiscuous mode [ 2829.915944][T21690] bond492: (slave bridge887): Enslaving as an active interface with an up link [ 2829.926356][T21695] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:15 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xf000) [ 2830.005724][T21695] bond1064: entered promiscuous mode [ 2830.012597][T21695] 8021q: adding VLAN 0 to HW filter on device bond1064 [ 2830.144923][T21697] bond1064: (slave bridge1091): making interface the new active one [ 2830.154772][T21697] bridge1091: entered promiscuous mode [ 2830.168162][T21697] bond1064: (slave bridge1091): Enslaving as an active interface with an up link 17:03:15 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x174, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2830.219651][T21699] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2830.344479][T21699] bond996: entered promiscuous mode [ 2830.350489][T21699] 8021q: adding VLAN 0 to HW filter on device bond996 17:03:15 executing program 0: ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f00000005c0)={'wpan0\x00', 0x0}) sendmsg$NL802154_CMD_GET_SEC_DEVKEY(0xffffffffffffffff, &(0x7f0000000680)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000640)={&(0x7f0000000600)={0x30, 0x0, 0x300, 0x70bd2a, 0x25dfdbfb, {}, [@NL802154_ATTR_IFINDEX={0x8, 0x3, r0}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x100000001}]}, 0x30}, 0x1, 0x0, 0x0, 0x40}, 0x40080) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='cgroup.controllers\x00', 0x275a, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) accept4$netrom(r1, 0x0, &(0x7f00000003c0), 0x80000) (async) r3 = accept4$netrom(r1, 0x0, &(0x7f00000003c0), 0x80000) getpeername$netrom(r3, &(0x7f00000006c0)={{0x3, @netrom}, [@default, @remote, @rose, @bcast, @rose, @default, @rose]}, &(0x7f0000000240)=0x84) (async) getpeername$netrom(r3, &(0x7f00000006c0)={{0x3, @netrom}, [@default, @remote, @rose, @bcast, @rose, @default, @rose]}, &(0x7f0000000240)=0x84) syz_genetlink_get_family_id$mptcp(&(0x7f0000000300), 0xffffffffffffffff) (async) r4 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000300), 0xffffffffffffffff) sendmsg$MPTCP_PM_CMD_ADD_ADDR(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000740)=ANY=[@ANYBLOB="300000007d6ec7b827381a27e980adf51d9db048aa62c830e075e7170dcef2ffee7f403d8519d137adfdf78365d5bf0ece21a8d119a64c5616e331c32f32bcf812a2889ea548dabcfe70bce5af616d6491a30ceb414ce4914f508460edb63304d668ccd07c51519e3e7eed23307b538e8c3796bb5ea0baf8f32f4f1a8107478ded06aeb3ef60ea2a80e0ae3a73c4862188d1bbbcc626d4df2206ac1831289a9a8da619a72186f3989da0b169320c590083e891db1d1ff4e27344dd3101eee1178f0b687799702f796697a5e84e618da0863deb7d31130441172b80c75a0f1f4295fe144e7160e6c39e9e12e7095dbb8850d3ef9d6ee1b5", @ANYRES16=r4, @ANYBLOB="8f1a0000000000000000010000001c0001800800060011000000080003007f0000010600010002000000"], 0x30}}, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r2, 0xc020f509, &(0x7f0000000040)={r1, 0xcf, 0x9, 0x6}) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000280)='cgroup.controllers\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000000), 0xffffff6a) ioctl$FIBMAP(r5, 0x1, &(0x7f00000000c0)) (async) ioctl$FIBMAP(r5, 0x1, &(0x7f00000000c0)) pipe(&(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) bpf$BPF_PROG_WITH_BTFID_LOAD(0x2, &(0x7f0000000000)=@bpf_lsm={0x1d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x1b, r6, 0x8, 0x0, 0x0, 0x10, 0x0, 0x0, 0xffffffffffffffff}, 0x80) write$cgroup_subtree(0xffffffffffffffff, &(0x7f0000000500)={[{0x2b, 'net'}, {0x2d, 'rdma'}, {0x2b, 'cpuset'}, {0x2d, 'io'}]}, 0x17) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='cpuset.effective_cpus\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000380), 0x101bf) (async) write$cgroup_int(r7, &(0x7f0000000380), 0x101bf) ioctl$FS_IOC_RESVSP(r6, 0x40305828, &(0x7f0000000540)={0x0, 0x3, 0x6}) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r7, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r7, 0xc028660f, &(0x7f0000000180)={0x2880008, r5}) (async) ioctl$EXT4_IOC_MOVE_EXT(r7, 0xc028660f, &(0x7f0000000180)={0x2880008, r5}) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x0) (async) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) ioctl$FS_IOC_RESVSP(r7, 0x40305828, &(0x7f0000000080)={0x0, 0x0, 0x100000000, 0xfff}) (async) ioctl$FS_IOC_RESVSP(r7, 0x40305828, &(0x7f0000000080)={0x0, 0x0, 0x100000000, 0xfff}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) [ 2830.529000][T21701] bond996: (slave bridge1062): making interface the new active one [ 2830.537187][T21701] bridge1062: entered promiscuous mode 17:03:15 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf5ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2830.568584][T21701] bond996: (slave bridge1062): Enslaving as an active interface with an up link [ 2830.605515][T21705] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:03:15 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4, 0x5}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2830.634459][T21705] workqueue: Failed to create a rescuer kthread for wq "bond642": -EINTR [ 2830.749590][T21712] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:03:15 executing program 0: ioctl$sock_SIOCGIFINDEX_802154(0xffffffffffffffff, 0x8933, &(0x7f00000005c0)={'wpan0\x00', 0x0}) sendmsg$NL802154_CMD_GET_SEC_DEVKEY(0xffffffffffffffff, &(0x7f0000000680)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000640)={&(0x7f0000000600)={0x30, 0x0, 0x300, 0x70bd2a, 0x25dfdbfb, {}, [@NL802154_ATTR_IFINDEX={0x8, 0x3, r0}, @NL802154_ATTR_IFINDEX={0x8}, @NL802154_ATTR_WPAN_DEV={0xc, 0x6, 0x100000001}]}, 0x30}, 0x1, 0x0, 0x0, 0x40}, 0x40080) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='cgroup.controllers\x00', 0x275a, 0x0) (async) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = accept4$netrom(r1, 0x0, &(0x7f00000003c0), 0x80000) getpeername$netrom(r3, &(0x7f00000006c0)={{0x3, @netrom}, [@default, @remote, @rose, @bcast, @rose, @default, @rose]}, &(0x7f0000000240)=0x84) r4 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000300), 0xffffffffffffffff) sendmsg$MPTCP_PM_CMD_ADD_ADDR(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000740)=ANY=[@ANYBLOB="300000007d6ec7b827381a27e980adf51d9db048aa62c830e075e7170dcef2ffee7f403d8519d137adfdf78365d5bf0ece21a8d119a64c5616e331c32f32bcf812a2889ea548dabcfe70bce5af616d6491a30ceb414ce4914f508460edb63304d668ccd07c51519e3e7eed23307b538e8c3796bb5ea0baf8f32f4f1a8107478ded06aeb3ef60ea2a80e0ae3a73c4862188d1bbbcc626d4df2206ac1831289a9a8da619a72186f3989da0b169320c590083e891db1d1ff4e27344dd3101eee1178f0b687799702f796697a5e84e618da0863deb7d31130441172b80c75a0f1f4295fe144e7160e6c39e9e12e7095dbb8850d3ef9d6ee1b5", @ANYRES16=r4, @ANYBLOB="8f1a0000000000000000010000001c0001800800060011000000080003007f0000010600010002000000"], 0x30}}, 0x0) (async) ioctl$F2FS_IOC_MOVE_RANGE(r2, 0xc020f509, &(0x7f0000000040)={r1, 0xcf, 0x9, 0x6}) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000280)='cgroup.controllers\x00', 0x275a, 0x0) write$cgroup_int(r5, &(0x7f0000000000), 0xffffff6a) (async) ioctl$FIBMAP(r5, 0x1, &(0x7f00000000c0)) (async) pipe(&(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) bpf$BPF_PROG_WITH_BTFID_LOAD(0x2, &(0x7f0000000000)=@bpf_lsm={0x1d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x1b, r6, 0x8, 0x0, 0x0, 0x10, 0x0, 0x0, 0xffffffffffffffff}, 0x80) write$cgroup_subtree(0xffffffffffffffff, &(0x7f0000000500)={[{0x2b, 'net'}, {0x2d, 'rdma'}, {0x2b, 'cpuset'}, {0x2d, 'io'}]}, 0x17) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000100)='cpuset.effective_cpus\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000380), 0x101bf) ioctl$FS_IOC_RESVSP(r6, 0x40305828, &(0x7f0000000540)={0x0, 0x3, 0x6}) (async) ioctl$EXT4_IOC_ALLOC_DA_BLKS(r7, 0x660c) ioctl$EXT4_IOC_MOVE_EXT(r7, 0xc028660f, &(0x7f0000000180)={0x2880008, r5}) (async) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) (async) ioctl$FS_IOC_RESVSP(r7, 0x40305828, &(0x7f0000000080)={0x0, 0x0, 0x100000000, 0xfff}) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) 17:03:15 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xa}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:15 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xf0ffff) [ 2830.776596][T21712] workqueue: Failed to create a rescuer kthread for wq "bond765": -EINTR [ 2830.916512][T21728] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:16 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x1000000) [ 2831.076686][T21728] bond1065: entered promiscuous mode [ 2831.082896][T21728] 8021q: adding VLAN 0 to HW filter on device bond1065 17:03:16 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x42068088}, 0xc, &(0x7f0000000140)={&(0x7f0000000200)=ANY=[@ANYBLOB="4c001c00000000f4d58b8cb54f59d3694f744ab0a40000e800", @ANYRES32=0x0, @ANYBLOB="f2fff1ff00000800ffff000006000500ff4b000008000b00f7ffffff0a000100726f7574650000000400020008000b0006000000"], 0x4c}, 0x1, 0x0, 0x0, 0x20000040}, 0x40000) [ 2831.236050][T21729] bond1065: (slave bridge1092): making interface the new active one [ 2831.246266][T21729] bridge1092: entered promiscuous mode [ 2831.272371][T21729] bond1065: (slave bridge1092): Enslaving as an active interface with an up link 17:03:16 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1da, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:16 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x42068088}, 0xc, &(0x7f0000000140)={&(0x7f0000000200)=ANY=[@ANYBLOB="4c001c00000000f4d58b8cb54f59d3694f744ab0a40000e800", @ANYRES32=0x0, @ANYBLOB="f2fff1ff00000800ffff000006000500ff4b000008000b00f7ffffff0a000100726f7574650000000400020008000b0006000000"], 0x4c}, 0x1, 0x0, 0x0, 0x20000040}, 0x40000) 17:03:16 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r0, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x42068088}, 0xc, &(0x7f0000000140)={&(0x7f0000000200)=ANY=[@ANYBLOB="4c001c00000000f4d58b8cb54f59d3694f744ab0a40000e800", @ANYRES32=0x0, @ANYBLOB="f2fff1ff00000800ffff000006000500ff4b000008000b00f7ffffff0a000100726f7574650000000400020008000b0006000000"], 0x4c}, 0x1, 0x0, 0x0, 0x20000040}, 0x40000) [ 2831.402938][T21735] bond997: entered promiscuous mode [ 2831.409920][T21735] 8021q: adding VLAN 0 to HW filter on device bond997 17:03:16 executing program 0: sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000001c0)={0x34, 0x38, 0x9, 0x0, 0x0, {0x1}, [@typed={0x4}, @nested={0xc, 0x1, 0x0, 0x1, [@typed={0x2e, 0x0, 0x0, 0x0, @str='\b\x00'}]}, @typed={0x8, 0x2, 0x0, 0x0, @pid=0xffffffffffffffff}, @typed={0x8, 0x9, 0x0, 0x0, @u32}]}, 0x34}}, 0x0) r0 = socket$nl_rdma(0x10, 0x3, 0x14) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r0, 0x8933, &(0x7f0000000000)) sendmsg$RDMA_NLDEV_CMD_SYS_GET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)={0x18, 0x1406, 0x401, 0xfffffffd, 0x0, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}]}, 0x18}}, 0x0) [ 2831.518532][T21739] bond997: (slave bridge1063): making interface the new active one [ 2831.527907][T21739] bridge1063: entered promiscuous mode [ 2831.551391][T21739] bond997: (slave bridge1063): Enslaving as an active interface with an up link 17:03:16 executing program 0: sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000001c0)={0x34, 0x38, 0x9, 0x0, 0x0, {0x1}, [@typed={0x4}, @nested={0xc, 0x1, 0x0, 0x1, [@typed={0x2e, 0x0, 0x0, 0x0, @str='\b\x00'}]}, @typed={0x8, 0x2, 0x0, 0x0, @pid=0xffffffffffffffff}, @typed={0x8, 0x9, 0x0, 0x0, @u32}]}, 0x34}}, 0x0) r0 = socket$nl_rdma(0x10, 0x3, 0x14) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r0, 0x8933, &(0x7f0000000000)) sendmsg$RDMA_NLDEV_CMD_SYS_GET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)={0x18, 0x1406, 0x401, 0xfffffffd, 0x0, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}]}, 0x18}}, 0x0) sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000001c0)={0x34, 0x38, 0x9, 0x0, 0x0, {0x1}, [@typed={0x4}, @nested={0xc, 0x1, 0x0, 0x1, [@typed={0x2e, 0x0, 0x0, 0x0, @str='\b\x00'}]}, @typed={0x8, 0x2, 0x0, 0x0, @pid=0xffffffffffffffff}, @typed={0x8, 0x9, 0x0, 0x0, @u32}]}, 0x34}}, 0x0) (async) socket$nl_rdma(0x10, 0x3, 0x14) (async) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r0, 0x8933, &(0x7f0000000000)) (async) sendmsg$RDMA_NLDEV_CMD_SYS_GET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)={0x18, 0x1406, 0x401, 0xfffffffd, 0x0, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}]}, 0x18}}, 0x0) (async) 17:03:16 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf6030000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2831.652429][T21745] bond642: entered promiscuous mode [ 2831.658372][T21745] 8021q: adding VLAN 0 to HW filter on device bond642 17:03:16 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4, 0x9}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:16 executing program 0: sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000001c0)={0x34, 0x38, 0x9, 0x0, 0x0, {0x1}, [@typed={0x4}, @nested={0xc, 0x1, 0x0, 0x1, [@typed={0x2e, 0x0, 0x0, 0x0, @str='\b\x00'}]}, @typed={0x8, 0x2, 0x0, 0x0, @pid=0xffffffffffffffff}, @typed={0x8, 0x9, 0x0, 0x0, @u32}]}, 0x34}}, 0x0) r0 = socket$nl_rdma(0x10, 0x3, 0x14) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r0, 0x8933, &(0x7f0000000000)) sendmsg$RDMA_NLDEV_CMD_SYS_GET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)={0x18, 0x1406, 0x401, 0xfffffffd, 0x0, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}]}, 0x18}}, 0x0) (async) sendmsg$RDMA_NLDEV_CMD_SYS_GET(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)={0x18, 0x1406, 0x401, 0xfffffffd, 0x0, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}]}, 0x18}}, 0x0) [ 2831.794630][T21754] bond765: entered promiscuous mode [ 2831.805911][T21754] 8021q: adding VLAN 0 to HW filter on device bond765 17:03:16 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xc}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:16 executing program 0: mmap(&(0x7f00007f9000/0x4000)=nil, 0x4000, 0x2000008, 0x8010, 0xffffffffffffffff, 0xffffc000) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0xb00000000065808, 0x0) r0 = socket$alg(0x26, 0x5, 0x0) openat$cgroup_netprio_ifpriomap(0xffffffffffffffff, &(0x7f00000000c0), 0x2, 0x0) bind$alg(r0, &(0x7f0000001300)={0x26, 'hash\x00', 0x0, 0x0, 'sha3-384\x00'}, 0x58) mmap(&(0x7f0000ff9000/0x4000)=nil, 0x4000, 0x0, 0x10, 0xffffffffffffffff, 0x0) r1 = accept$alg(r0, 0x0, 0x0) setsockopt$ALG_SET_KEY(r0, 0x117, 0x1, &(0x7f0000000100)="8a7c1ce49cdc7dd3ccc5b610e1b5897b9a49c1aab60346932b345fc1b6dcf535a7b0d301d47c4f771a1ecc6e0941dd7465c494474a880fa4b3d3f7f08369cee59e82cf609d1e262b55f1da86f242f33a2840d975d182a257be2f780b1eae406d709a2e2c8a8380d958b6cf22e7949d8c722c3e1f8278c74cc2b928e2a5eb01b37ccc9d4cf25d92770f69521c2cbafe2ca41b3def28fb07307670b91b94a48b3b15a768e2bc6079922ac6548f12607e104120cbac939d3c03f31f62f7ad8893", 0xbf) sendmmsg$inet(r1, &(0x7f0000001400)=[{{0x0, 0x0, &(0x7f0000000040)=[{&(0x7f0000000000)="e8", 0x1}], 0x1}}], 0xfffffdef, 0x0) setsockopt$ALG_SET_KEY(r0, 0x117, 0x1, &(0x7f00000001c0)="2dce428aecb2f09a551f673bdd4cf8c9e4cd9fb9ef16e1b496d69b599dee8e75322fad2fe2d8f41ce5712afcdf36d5f39d2cd982c17218b9af0e05cecba97bbdc97a8967edb4153708e0a6977599f791ff539be16672227708cb", 0x5a) 17:03:16 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x2000000) 17:03:16 executing program 0: mmap(&(0x7f00007f9000/0x4000)=nil, 0x4000, 0x2000008, 0x8010, 0xffffffffffffffff, 0xffffc000) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0xb00000000065808, 0x0) r0 = socket$alg(0x26, 0x5, 0x0) openat$cgroup_netprio_ifpriomap(0xffffffffffffffff, &(0x7f00000000c0), 0x2, 0x0) (async) bind$alg(r0, &(0x7f0000001300)={0x26, 'hash\x00', 0x0, 0x0, 'sha3-384\x00'}, 0x58) mmap(&(0x7f0000ff9000/0x4000)=nil, 0x4000, 0x0, 0x10, 0xffffffffffffffff, 0x0) (async) r1 = accept$alg(r0, 0x0, 0x0) setsockopt$ALG_SET_KEY(r0, 0x117, 0x1, &(0x7f0000000100)="8a7c1ce49cdc7dd3ccc5b610e1b5897b9a49c1aab60346932b345fc1b6dcf535a7b0d301d47c4f771a1ecc6e0941dd7465c494474a880fa4b3d3f7f08369cee59e82cf609d1e262b55f1da86f242f33a2840d975d182a257be2f780b1eae406d709a2e2c8a8380d958b6cf22e7949d8c722c3e1f8278c74cc2b928e2a5eb01b37ccc9d4cf25d92770f69521c2cbafe2ca41b3def28fb07307670b91b94a48b3b15a768e2bc6079922ac6548f12607e104120cbac939d3c03f31f62f7ad8893", 0xbf) (async) sendmmsg$inet(r1, &(0x7f0000001400)=[{{0x0, 0x0, &(0x7f0000000040)=[{&(0x7f0000000000)="e8", 0x1}], 0x1}}], 0xfffffdef, 0x0) (async) setsockopt$ALG_SET_KEY(r0, 0x117, 0x1, &(0x7f00000001c0)="2dce428aecb2f09a551f673bdd4cf8c9e4cd9fb9ef16e1b496d69b599dee8e75322fad2fe2d8f41ce5712afcdf36d5f39d2cd982c17218b9af0e05cecba97bbdc97a8967edb4153708e0a6977599f791ff539be16672227708cb", 0x5a) [ 2832.012850][T21777] bond1066: entered promiscuous mode [ 2832.024445][T21777] 8021q: adding VLAN 0 to HW filter on device bond1066 [ 2832.164568][T21778] bond1066: (slave bridge1093): making interface the new active one [ 2832.186876][T21778] bridge1093: entered promiscuous mode 17:03:17 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1e2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:17 executing program 0: mmap(&(0x7f00007f9000/0x4000)=nil, 0x4000, 0x2000008, 0x8010, 0xffffffffffffffff, 0xffffc000) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0xb00000000065808, 0x0) r0 = socket$alg(0x26, 0x5, 0x0) openat$cgroup_netprio_ifpriomap(0xffffffffffffffff, &(0x7f00000000c0), 0x2, 0x0) (async) openat$cgroup_netprio_ifpriomap(0xffffffffffffffff, &(0x7f00000000c0), 0x2, 0x0) bind$alg(r0, &(0x7f0000001300)={0x26, 'hash\x00', 0x0, 0x0, 'sha3-384\x00'}, 0x58) (async) bind$alg(r0, &(0x7f0000001300)={0x26, 'hash\x00', 0x0, 0x0, 'sha3-384\x00'}, 0x58) mmap(&(0x7f0000ff9000/0x4000)=nil, 0x4000, 0x0, 0x10, 0xffffffffffffffff, 0x0) accept$alg(r0, 0x0, 0x0) (async) r1 = accept$alg(r0, 0x0, 0x0) setsockopt$ALG_SET_KEY(r0, 0x117, 0x1, &(0x7f0000000100)="8a7c1ce49cdc7dd3ccc5b610e1b5897b9a49c1aab60346932b345fc1b6dcf535a7b0d301d47c4f771a1ecc6e0941dd7465c494474a880fa4b3d3f7f08369cee59e82cf609d1e262b55f1da86f242f33a2840d975d182a257be2f780b1eae406d709a2e2c8a8380d958b6cf22e7949d8c722c3e1f8278c74cc2b928e2a5eb01b37ccc9d4cf25d92770f69521c2cbafe2ca41b3def28fb07307670b91b94a48b3b15a768e2bc6079922ac6548f12607e104120cbac939d3c03f31f62f7ad8893", 0xbf) sendmmsg$inet(r1, &(0x7f0000001400)=[{{0x0, 0x0, &(0x7f0000000040)=[{&(0x7f0000000000)="e8", 0x1}], 0x1}}], 0xfffffdef, 0x0) setsockopt$ALG_SET_KEY(r0, 0x117, 0x1, &(0x7f00000001c0)="2dce428aecb2f09a551f673bdd4cf8c9e4cd9fb9ef16e1b496d69b599dee8e75322fad2fe2d8f41ce5712afcdf36d5f39d2cd982c17218b9af0e05cecba97bbdc97a8967edb4153708e0a6977599f791ff539be16672227708cb", 0x5a) [ 2832.213217][T21778] bond1066: (slave bridge1093): Enslaving as an active interface with an up link [ 2832.276587][T21795] bond998: entered promiscuous mode [ 2832.285120][T21795] 8021q: adding VLAN 0 to HW filter on device bond998 17:03:17 executing program 0: r0 = socket(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), r1) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000080)=@newlink={0x3c, 0x10, 0x401, 0x0, 0x0, {0x0, 0x0, 0x0, r2}, [@IFLA_LINKINFO={0x1c, 0x12, 0x0, 0x1, @bond={{0x9}, {0xc, 0x2, 0x0, 0x1, [@IFLA_BOND_MODE={0x5, 0x1, 0x5}]}}}]}, 0x3c}}, 0x0) r3 = socket(0x10, 0x3, 0x0) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000200), 0xf000) sendfile(r4, r5, 0x0, 0xf03b0000) sendfile(r4, r4, &(0x7f0000000180), 0x0) r6 = socket$nl_route(0x10, 0x3, 0x0) r7 = socket(0x10, 0x3, 0x0) r8 = socket$nl_route(0x10, 0x3, 0x0) r9 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r9, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r9, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r8, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r10, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r7, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r10, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(r6, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r10, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) ioctl$sock_inet6_SIOCADDRT(r4, 0x890b, &(0x7f0000000000)={@loopback, @ipv4={'\x00', '\xff\xff', @empty}, @private2, 0x5, 0x7, 0x3, 0x500, 0xfffffffffffffff9, 0x1080002, r10}) r11 = socket(0x10, 0x803, 0x9) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), r11) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000200), 0xf000) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0xf03b0000) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r12, 0xffffffffffffffff, &(0x7f00000002c0)=0x335773c3, 0x8) write$binfmt_script(r12, &(0x7f0000000280)={'#! ', './file0', [{0x20, '(\'.D#'}, {0x20, '+#`'}], 0xa, "f6f894d1663ae050a316c024cecaa5597235e0bfb64507bc9f96e2e62bf7d395509ff0b1acdae6b83ac05c3c71790f24b1038c19e72c52ac51282b6a9e1a8726ee16670f0505edda531564180e31e3a7502f20a5d32c0871142565f5e0bce17c43b03be47e1a3d187f5e773590abb076b0c3c38af1ff9821ffe737b83d755f3070caa639f5bd2d9f87bc2711a42c8fb8124b04f1b81c031229c03805877bfd9e729b16ba26753591486a121af759322652dfdf5650c7f212ca8aec42cf"}, 0xd2) getsockname$packet(r11, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000180)=@bridge_dellink={0x34, 0x11, 0x200, 0x70bd2c, 0x25dfdbff, {0x7, 0x0, 0x0, 0x0, 0x2848, 0x2000}, [@IFLA_BROADCAST={0xa}, @IFLA_NET_NS_FD={0x8}]}, 0x34}}, 0x0) [ 2832.395990][T21799] bond643: entered promiscuous mode [ 2832.403284][T21799] 8021q: adding VLAN 0 to HW filter on device bond643 [ 2832.485537][T21802] bond998: (slave bridge1064): making interface the new active one [ 2832.494740][T21802] bridge1064: entered promiscuous mode [ 2832.509735][T21802] bond998: (slave bridge1064): Enslaving as an active interface with an up link 17:03:17 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf6ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:17 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4, 0xa}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2832.700299][T21809] bond766: entered promiscuous mode [ 2832.706006][T21809] 8021q: adding VLAN 0 to HW filter on device bond766 17:03:17 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xe}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:17 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x4000000) [ 2832.865612][T21829] bond1067: entered promiscuous mode [ 2832.872483][T21829] 8021q: adding VLAN 0 to HW filter on device bond1067 17:03:17 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x20a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2832.933180][T21830] bond1067: (slave bridge1094): making interface the new active one [ 2832.943288][T21830] bridge1094: entered promiscuous mode [ 2832.956781][T21830] bond1067: (slave bridge1094): Enslaving as an active interface with an up link [ 2833.040452][T21834] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:18 executing program 0: r0 = socket(0x10, 0x3, 0x0) (async) r1 = socket(0x10, 0x803, 0x0) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), r1) (async) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000080)=@newlink={0x3c, 0x10, 0x401, 0x0, 0x0, {0x0, 0x0, 0x0, r2}, [@IFLA_LINKINFO={0x1c, 0x12, 0x0, 0x1, @bond={{0x9}, {0xc, 0x2, 0x0, 0x1, [@IFLA_BOND_MODE={0x5, 0x1, 0x5}]}}}]}, 0x3c}}, 0x0) (async) r3 = socket(0x10, 0x3, 0x0) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000200), 0xf000) (async) sendfile(r4, r5, 0x0, 0xf03b0000) (async) sendfile(r4, r4, &(0x7f0000000180), 0x0) (async) r6 = socket$nl_route(0x10, 0x3, 0x0) r7 = socket(0x10, 0x3, 0x0) (async) r8 = socket$nl_route(0x10, 0x3, 0x0) r9 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r9, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r9, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r8, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r10, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r7, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r10, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(r6, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r10, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) (async) ioctl$sock_inet6_SIOCADDRT(r4, 0x890b, &(0x7f0000000000)={@loopback, @ipv4={'\x00', '\xff\xff', @empty}, @private2, 0x5, 0x7, 0x3, 0x500, 0xfffffffffffffff9, 0x1080002, r10}) (async) r11 = socket(0x10, 0x803, 0x9) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), r11) (async) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000200), 0xf000) (async) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0xf03b0000) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r12, 0xffffffffffffffff, &(0x7f00000002c0)=0x335773c3, 0x8) write$binfmt_script(r12, &(0x7f0000000280)={'#! ', './file0', [{0x20, '(\'.D#'}, {0x20, '+#`'}], 0xa, "f6f894d1663ae050a316c024cecaa5597235e0bfb64507bc9f96e2e62bf7d395509ff0b1acdae6b83ac05c3c71790f24b1038c19e72c52ac51282b6a9e1a8726ee16670f0505edda531564180e31e3a7502f20a5d32c0871142565f5e0bce17c43b03be47e1a3d187f5e773590abb076b0c3c38af1ff9821ffe737b83d755f3070caa639f5bd2d9f87bc2711a42c8fb8124b04f1b81c031229c03805877bfd9e729b16ba26753591486a121af759322652dfdf5650c7f212ca8aec42cf"}, 0xd2) getsockname$packet(r11, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000180)=@bridge_dellink={0x34, 0x11, 0x200, 0x70bd2c, 0x25dfdbff, {0x7, 0x0, 0x0, 0x0, 0x2848, 0x2000}, [@IFLA_BROADCAST={0xa}, @IFLA_NET_NS_FD={0x8}]}, 0x34}}, 0x0) [ 2833.160114][T21839] bond999: entered promiscuous mode [ 2833.166419][T21839] 8021q: adding VLAN 0 to HW filter on device bond999 [ 2833.226537][T21841] bond644: entered promiscuous mode [ 2833.233567][T21841] 8021q: adding VLAN 0 to HW filter on device bond644 [ 2833.330936][T21842] bond999: (slave bridge1065): making interface the new active one [ 2833.349951][T21842] bridge1065: entered promiscuous mode [ 2833.376774][T21842] bond999: (slave bridge1065): Enslaving as an active interface with an up link 17:03:18 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf7ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:18 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4, 0x10}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2833.525107][T21847] bond767: entered promiscuous mode [ 2833.540779][T21847] 8021q: adding VLAN 0 to HW filter on device bond767 17:03:18 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x10}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:18 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x6000000) [ 2833.736956][T21858] bond1068: entered promiscuous mode [ 2833.751287][T21858] 8021q: adding VLAN 0 to HW filter on device bond1068 17:03:18 executing program 0: r0 = socket(0x10, 0x3, 0x0) (async) r1 = socket(0x10, 0x803, 0x0) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), r1) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000080)=@newlink={0x3c, 0x10, 0x401, 0x0, 0x0, {0x0, 0x0, 0x0, r2}, [@IFLA_LINKINFO={0x1c, 0x12, 0x0, 0x1, @bond={{0x9}, {0xc, 0x2, 0x0, 0x1, [@IFLA_BOND_MODE={0x5, 0x1, 0x5}]}}}]}, 0x3c}}, 0x0) (async) r3 = socket(0x10, 0x3, 0x0) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r4, &(0x7f0000000200), 0xf000) (async) sendfile(r4, r5, 0x0, 0xf03b0000) sendfile(r4, r4, &(0x7f0000000180), 0x0) (async) r6 = socket$nl_route(0x10, 0x3, 0x0) r7 = socket(0x10, 0x3, 0x0) (async) r8 = socket$nl_route(0x10, 0x3, 0x0) (async) r9 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r9, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) (async) getsockname$packet(r9, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r8, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r10, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r7, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r10, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(r6, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r10, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) ioctl$sock_inet6_SIOCADDRT(r4, 0x890b, &(0x7f0000000000)={@loopback, @ipv4={'\x00', '\xff\xff', @empty}, @private2, 0x5, 0x7, 0x3, 0x500, 0xfffffffffffffff9, 0x1080002, r10}) (async) r11 = socket(0x10, 0x803, 0x9) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), r11) (async) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000200), 0xf000) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0xf03b0000) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r12, 0xffffffffffffffff, &(0x7f00000002c0)=0x335773c3, 0x8) (async) write$binfmt_script(r12, &(0x7f0000000280)={'#! ', './file0', [{0x20, '(\'.D#'}, {0x20, '+#`'}], 0xa, "f6f894d1663ae050a316c024cecaa5597235e0bfb64507bc9f96e2e62bf7d395509ff0b1acdae6b83ac05c3c71790f24b1038c19e72c52ac51282b6a9e1a8726ee16670f0505edda531564180e31e3a7502f20a5d32c0871142565f5e0bce17c43b03be47e1a3d187f5e773590abb076b0c3c38af1ff9821ffe737b83d755f3070caa639f5bd2d9f87bc2711a42c8fb8124b04f1b81c031229c03805877bfd9e729b16ba26753591486a121af759322652dfdf5650c7f212ca8aec42cf"}, 0xd2) (async) getsockname$packet(r11, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000180)=@bridge_dellink={0x34, 0x11, 0x200, 0x70bd2c, 0x25dfdbff, {0x7, 0x0, 0x0, 0x0, 0x2848, 0x2000}, [@IFLA_BROADCAST={0xa}, @IFLA_NET_NS_FD={0x8}]}, 0x34}}, 0x0) [ 2833.855523][T21859] bond1068: (slave bridge1095): making interface the new active one [ 2833.866716][T21859] bridge1095: entered promiscuous mode [ 2833.881840][T21859] bond1068: (slave bridge1095): Enslaving as an active interface with an up link 17:03:18 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x22a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2833.983048][T21871] bond645: entered promiscuous mode [ 2833.999242][T21871] 8021q: adding VLAN 0 to HW filter on device bond645 [ 2834.014559][T21873] validate_nla: 13 callbacks suppressed [ 2834.014585][T21873] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2834.085669][T21873] bond1000: entered promiscuous mode [ 2834.104637][T21873] 8021q: adding VLAN 0 to HW filter on device bond1000 17:03:19 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4, 0x14}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2834.232458][T21875] bond1000: (slave bridge1066): making interface the new active one [ 2834.242168][T21875] bridge1066: entered promiscuous mode [ 2834.257864][T21875] bond1000: (slave bridge1066): Enslaving as an active interface with an up link [ 2834.267827][T21879] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:03:19 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf8ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2834.345998][T21879] bond768: entered promiscuous mode [ 2834.364735][T21879] 8021q: adding VLAN 0 to HW filter on device bond768 17:03:19 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xc}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:19 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x16}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2834.470787][T21892] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:19 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x8000000) [ 2834.549602][T21892] bond1069: entered promiscuous mode [ 2834.565705][T21892] 8021q: adding VLAN 0 to HW filter on device bond1069 [ 2834.692716][T21896] bond1069: (slave bridge1096): making interface the new active one [ 2834.704063][T21896] bridge1096: entered promiscuous mode [ 2834.720244][T21896] bond1069: (slave bridge1096): Enslaving as an active interface with an up link 17:03:19 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x242, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2834.770591][T21898] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2834.843862][T21898] bond646: entered promiscuous mode [ 2834.849832][T21898] 8021q: adding VLAN 0 to HW filter on device bond646 17:03:19 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4, 0x63}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2834.890728][T21902] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2834.966395][T21902] bond1001: entered promiscuous mode [ 2834.973498][T21902] 8021q: adding VLAN 0 to HW filter on device bond1001 [ 2835.075481][T21903] bond1001: (slave bridge1067): making interface the new active one [ 2835.086172][T21903] bridge1067: entered promiscuous mode [ 2835.103232][T21903] bond1001: (slave bridge1067): Enslaving as an active interface with an up link [ 2835.114840][T21905] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 17:03:20 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xf9ab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2835.182832][T21905] bond494: entered promiscuous mode [ 2835.189135][T21905] 8021q: adding VLAN 0 to HW filter on device bond494 [ 2835.224034][T21910] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2835.289624][T21910] bond769: entered promiscuous mode [ 2835.295700][T21910] 8021q: adding VLAN 0 to HW filter on device bond769 17:03:20 executing program 0: pipe(&(0x7f0000000780)={0xffffffffffffffff}) r1 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000800), 0xffffffffffffffff) sendmsg$TIPC_NL_BEARER_SET(r0, &(0x7f0000000900)={&(0x7f00000007c0)={0x10, 0x0, 0x0, 0x800}, 0xc, &(0x7f00000008c0)={&(0x7f0000000840)={0x54, r1, 0x300, 0x70bd2b, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x40, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x10}, @TIPC_NLA_PROP_WIN={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0xb3c7}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x6}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x9}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}]}]}, 0x54}}, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$SEG6_CMD_SETHMAC(r2, &(0x7f0000000100)={0x0, 0xe00, &(0x7f00000000c0)={&(0x7f0000000040)={0x34, r3, 0x1, 0x0, 0x0, {}, [@SEG6_ATTR_HMACKEYID={0x8, 0x3, 0x1f}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x4}, @SEG6_ATTR_SECRET={0x8, 0x4, [0x0]}, @SEG6_ATTR_ALGID={0x5}]}, 0x34}}, 0x0) r4 = socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000680)=ANY=[@ANYBLOB="46040000d448d30ad21fd12d11a1914f8292d562920554fe53e95a4103d50534775806ee4b4427285f62a50b6a1c0dafe75c834b6db8545ec47f3cf2bee4a7ec293ada7e6f9612375cb4a93dffd92827575429d0b6fb7ab831da88f39afdef016fa9ac9fdf2457ebc7207d0d332d4900b422f6e04d18d0fd956eca28495b70399fdd6f823892bd4b033b9a13693f58f841717edc1a705519421c2c3871f4b4d0867d1286dbbe613da4d67090c0335a3a6d8643f062dd260632dc534324e90686ce", @ANYRES16=r5, @ANYBLOB="ff830500000000000000", @ANYRES32=r4], 0x4}}, 0x0) r6 = socket$igmp(0x2, 0x3, 0x2) r7 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = accept4$nfc_llcp(r0, &(0x7f0000000280), &(0x7f0000000940)=0x60, 0x800) ioctl$FS_IOC_RESVSP(0xffffffffffffffff, 0x40305828, &(0x7f00000009c0)={0x0, 0x1, 0x0, 0x200}) ioctl$F2FS_IOC_WRITE_CHECKPOINT(r10, 0xf507, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) r11 = syz_genetlink_get_family_id$batadv(&(0x7f0000000580), r2) sendmsg$BATADV_CMD_TP_METER(r8, &(0x7f0000000640)={&(0x7f0000000540), 0xc, &(0x7f0000000600)={&(0x7f00000005c0)={0x1c, r11, 0x100, 0x70bd2b, 0x25dfdbfc, {}, [@BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5}]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000080}, 0x4000000) sendfile(r9, r8, 0x0, 0x800000000000c) ioctl$sock_SIOCGIFINDEX_80211(r9, 0x8933, &(0x7f0000000500)={'wlan1\x00'}) r12 = gettid() r13 = socket$inet6(0xa, 0x1, 0x8010000000000084) bind$inet6(r13, &(0x7f00000000c0)={0xa, 0x4e21, 0x0, @empty}, 0x1c) connect$inet6(r13, &(0x7f0000000000)={0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x39}}}, 0x1c) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r13, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) write$binfmt_misc(r7, &(0x7f0000000180)=ANY=[@ANYRESHEX, @ANYRES32=0x0, @ANYRES64=r13, @ANYRES64=r12, @ANYRES32=r9, @ANYRESOCT], 0x4) sendmsg$DEVLINK_CMD_PORT_GET(0xffffffffffffffff, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf4, 0x0, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf4}, 0x1, 0x0, 0x0, 0x10}, 0x0) setsockopt$IP_VS_SO_SET_ADD(r6, 0x0, 0x482, &(0x7f0000000080)={0x84, @rand_addr, 0x0, 0x0, 'wrr\x00', 0x0, 0x7fffffff, 0x36}, 0x2c) sendmsg$BATADV_CMD_GET_HARDIF(r2, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e95", @ANYRES16=r5, @ANYBLOB="000826bd0800fbdbdf250500000005002a000100000005003700000000000500330002"], 0x34}}, 0x21) 17:03:20 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xa000000) 17:03:20 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x60}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2835.571329][T21919] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2835.651387][T21919] bond1070: entered promiscuous mode [ 2835.657821][T21919] 8021q: adding VLAN 0 to HW filter on device bond1070 [ 2835.793182][T21922] bond1070: (slave bridge1097): making interface the new active one [ 2835.803645][T21922] bridge1097: entered promiscuous mode [ 2835.822781][T21922] bond1070: (slave bridge1097): Enslaving as an active interface with an up link [ 2835.833455][T21923] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:03:20 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x244, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:20 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2835.865284][T21923] workqueue: Failed to create a rescuer kthread for wq "bond647": -EINTR [ 2835.940482][T21928] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2836.020698][T21928] bond1002: entered promiscuous mode [ 2836.026880][T21928] 8021q: adding VLAN 0 to HW filter on device bond1002 17:03:21 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfa020000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2836.132696][T21929] bond1002: (slave bridge1068): making interface the new active one [ 2836.142119][T21929] bridge1068: entered promiscuous mode [ 2836.154575][T21929] bond1002: (slave bridge1068): Enslaving as an active interface with an up link [ 2836.215193][T21933] bond770: entered promiscuous mode [ 2836.221273][T21933] 8021q: adding VLAN 0 to HW filter on device bond770 17:03:21 executing program 0: pipe(&(0x7f0000000780)={0xffffffffffffffff}) (async) r1 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000800), 0xffffffffffffffff) sendmsg$TIPC_NL_BEARER_SET(r0, &(0x7f0000000900)={&(0x7f00000007c0)={0x10, 0x0, 0x0, 0x800}, 0xc, &(0x7f00000008c0)={&(0x7f0000000840)={0x54, r1, 0x300, 0x70bd2b, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x40, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x10}, @TIPC_NLA_PROP_WIN={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0xb3c7}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x6}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x9}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}]}]}, 0x54}}, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) (async) r3 = syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$SEG6_CMD_SETHMAC(r2, &(0x7f0000000100)={0x0, 0xe00, &(0x7f00000000c0)={&(0x7f0000000040)={0x34, r3, 0x1, 0x0, 0x0, {}, [@SEG6_ATTR_HMACKEYID={0x8, 0x3, 0x1f}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x4}, @SEG6_ATTR_SECRET={0x8, 0x4, [0x0]}, @SEG6_ATTR_ALGID={0x5}]}, 0x34}}, 0x0) (async) r4 = socket$rxrpc(0x21, 0x2, 0xa) (async) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000680)=ANY=[@ANYBLOB="46040000d448d30ad21fd12d11a1914f8292d562920554fe53e95a4103d50534775806ee4b4427285f62a50b6a1c0dafe75c834b6db8545ec47f3cf2bee4a7ec293ada7e6f9612375cb4a93dffd92827575429d0b6fb7ab831da88f39afdef016fa9ac9fdf2457ebc7207d0d332d4900b422f6e04d18d0fd956eca28495b70399fdd6f823892bd4b033b9a13693f58f841717edc1a705519421c2c3871f4b4d0867d1286dbbe613da4d67090c0335a3a6d8643f062dd260632dc534324e90686ce", @ANYRES16=r5, @ANYBLOB="ff830500000000000000", @ANYRES32=r4], 0x4}}, 0x0) (async) r6 = socket$igmp(0x2, 0x3, 0x2) (async) r7 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) (async) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r10 = accept4$nfc_llcp(r0, &(0x7f0000000280), &(0x7f0000000940)=0x60, 0x800) ioctl$FS_IOC_RESVSP(0xffffffffffffffff, 0x40305828, &(0x7f00000009c0)={0x0, 0x1, 0x0, 0x200}) (async) ioctl$F2FS_IOC_WRITE_CHECKPOINT(r10, 0xf507, 0x0) (async) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) (async) r11 = syz_genetlink_get_family_id$batadv(&(0x7f0000000580), r2) sendmsg$BATADV_CMD_TP_METER(r8, &(0x7f0000000640)={&(0x7f0000000540), 0xc, &(0x7f0000000600)={&(0x7f00000005c0)={0x1c, r11, 0x100, 0x70bd2b, 0x25dfdbfc, {}, [@BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5}]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000080}, 0x4000000) sendfile(r9, r8, 0x0, 0x800000000000c) ioctl$sock_SIOCGIFINDEX_80211(r9, 0x8933, &(0x7f0000000500)={'wlan1\x00'}) (async) r12 = gettid() (async) r13 = socket$inet6(0xa, 0x1, 0x8010000000000084) bind$inet6(r13, &(0x7f00000000c0)={0xa, 0x4e21, 0x0, @empty}, 0x1c) (async) connect$inet6(r13, &(0x7f0000000000)={0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x39}}}, 0x1c) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r13, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) (async) write$binfmt_misc(r7, &(0x7f0000000180)=ANY=[@ANYRESHEX, @ANYRES32=0x0, @ANYRES64=r13, @ANYRES64=r12, @ANYRES32=r9, @ANYRESOCT], 0x4) (async) sendmsg$DEVLINK_CMD_PORT_GET(0xffffffffffffffff, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf4, 0x0, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf4}, 0x1, 0x0, 0x0, 0x10}, 0x0) setsockopt$IP_VS_SO_SET_ADD(r6, 0x0, 0x482, &(0x7f0000000080)={0x84, @rand_addr, 0x0, 0x0, 'wrr\x00', 0x0, 0x7fffffff, 0x36}, 0x2c) sendmsg$BATADV_CMD_GET_HARDIF(r2, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e95", @ANYRES16=r5, @ANYBLOB="000826bd0800fbdbdf250500000005002a000100000005003700000000000500330002"], 0x34}}, 0x21) 17:03:21 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xf0}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:21 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xc000000) [ 2836.470586][T21947] bond1071: entered promiscuous mode [ 2836.476370][T21947] 8021q: adding VLAN 0 to HW filter on device bond1071 17:03:21 executing program 0: pipe(&(0x7f0000000780)={0xffffffffffffffff}) r1 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000800), 0xffffffffffffffff) sendmsg$TIPC_NL_BEARER_SET(r0, &(0x7f0000000900)={&(0x7f00000007c0)={0x10, 0x0, 0x0, 0x800}, 0xc, &(0x7f00000008c0)={&(0x7f0000000840)={0x54, r1, 0x300, 0x70bd2b, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x40, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x10}, @TIPC_NLA_PROP_WIN={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0xb3c7}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x6}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x9}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}]}]}, 0x54}}, 0x0) (async) sendmsg$TIPC_NL_BEARER_SET(r0, &(0x7f0000000900)={&(0x7f00000007c0)={0x10, 0x0, 0x0, 0x800}, 0xc, &(0x7f00000008c0)={&(0x7f0000000840)={0x54, r1, 0x300, 0x70bd2b, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x40, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x10}, @TIPC_NLA_PROP_WIN={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0xb3c7}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x6}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x9}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}]}]}, 0x54}}, 0x0) socket$nl_generic(0x10, 0x3, 0x10) (async) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$SEG6_CMD_SETHMAC(r2, &(0x7f0000000100)={0x0, 0xe00, &(0x7f00000000c0)={&(0x7f0000000040)={0x34, r3, 0x1, 0x0, 0x0, {}, [@SEG6_ATTR_HMACKEYID={0x8, 0x3, 0x1f}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x4}, @SEG6_ATTR_SECRET={0x8, 0x4, [0x0]}, @SEG6_ATTR_ALGID={0x5}]}, 0x34}}, 0x0) socket$rxrpc(0x21, 0x2, 0xa) (async) r4 = socket$rxrpc(0x21, 0x2, 0xa) syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) (async) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000680)=ANY=[@ANYBLOB="46040000d448d30ad21fd12d11a1914f8292d562920554fe53e95a4103d50534775806ee4b4427285f62a50b6a1c0dafe75c834b6db8545ec47f3cf2bee4a7ec293ada7e6f9612375cb4a93dffd92827575429d0b6fb7ab831da88f39afdef016fa9ac9fdf2457ebc7207d0d332d4900b422f6e04d18d0fd956eca28495b70399fdd6f823892bd4b033b9a13693f58f841717edc1a705519421c2c3871f4b4d0867d1286dbbe613da4d67090c0335a3a6d8643f062dd260632dc534324e90686ce", @ANYRES16=r5, @ANYBLOB="ff830500000000000000", @ANYRES32=r4], 0x4}}, 0x0) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000680)=ANY=[@ANYBLOB="46040000d448d30ad21fd12d11a1914f8292d562920554fe53e95a4103d50534775806ee4b4427285f62a50b6a1c0dafe75c834b6db8545ec47f3cf2bee4a7ec293ada7e6f9612375cb4a93dffd92827575429d0b6fb7ab831da88f39afdef016fa9ac9fdf2457ebc7207d0d332d4900b422f6e04d18d0fd956eca28495b70399fdd6f823892bd4b033b9a13693f58f841717edc1a705519421c2c3871f4b4d0867d1286dbbe613da4d67090c0335a3a6d8643f062dd260632dc534324e90686ce", @ANYRES16=r5, @ANYBLOB="ff830500000000000000", @ANYRES32=r4], 0x4}}, 0x0) r6 = socket$igmp(0x2, 0x3, 0x2) syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) (async) r7 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = accept4$nfc_llcp(r0, &(0x7f0000000280), &(0x7f0000000940)=0x60, 0x800) ioctl$FS_IOC_RESVSP(0xffffffffffffffff, 0x40305828, &(0x7f00000009c0)={0x0, 0x1, 0x0, 0x200}) ioctl$F2FS_IOC_WRITE_CHECKPOINT(r10, 0xf507, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) r11 = syz_genetlink_get_family_id$batadv(&(0x7f0000000580), r2) sendmsg$BATADV_CMD_TP_METER(r8, &(0x7f0000000640)={&(0x7f0000000540), 0xc, &(0x7f0000000600)={&(0x7f00000005c0)={0x1c, r11, 0x100, 0x70bd2b, 0x25dfdbfc, {}, [@BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5}]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000080}, 0x4000000) (async) sendmsg$BATADV_CMD_TP_METER(r8, &(0x7f0000000640)={&(0x7f0000000540), 0xc, &(0x7f0000000600)={&(0x7f00000005c0)={0x1c, r11, 0x100, 0x70bd2b, 0x25dfdbfc, {}, [@BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5}]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000080}, 0x4000000) sendfile(r9, r8, 0x0, 0x800000000000c) ioctl$sock_SIOCGIFINDEX_80211(r9, 0x8933, &(0x7f0000000500)={'wlan1\x00'}) (async) ioctl$sock_SIOCGIFINDEX_80211(r9, 0x8933, &(0x7f0000000500)={'wlan1\x00'}) r12 = gettid() r13 = socket$inet6(0xa, 0x1, 0x8010000000000084) bind$inet6(r13, &(0x7f00000000c0)={0xa, 0x4e21, 0x0, @empty}, 0x1c) connect$inet6(r13, &(0x7f0000000000)={0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x39}}}, 0x1c) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r13, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) (async) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r13, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) write$binfmt_misc(r7, &(0x7f0000000180)=ANY=[@ANYRESHEX, @ANYRES32=0x0, @ANYRES64=r13, @ANYRES64=r12, @ANYRES32=r9, @ANYRESOCT], 0x4) (async) write$binfmt_misc(r7, &(0x7f0000000180)=ANY=[@ANYRESHEX, @ANYRES32=0x0, @ANYRES64=r13, @ANYRES64=r12, @ANYRES32=r9, @ANYRESOCT], 0x4) sendmsg$DEVLINK_CMD_PORT_GET(0xffffffffffffffff, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf4, 0x0, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf4}, 0x1, 0x0, 0x0, 0x10}, 0x0) setsockopt$IP_VS_SO_SET_ADD(r6, 0x0, 0x482, &(0x7f0000000080)={0x84, @rand_addr, 0x0, 0x0, 'wrr\x00', 0x0, 0x7fffffff, 0x36}, 0x2c) sendmsg$BATADV_CMD_GET_HARDIF(r2, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e95", @ANYRES16=r5, @ANYBLOB="000826bd0800fbdbdf250500000005002a000100000005003700000000000500330002"], 0x34}}, 0x21) [ 2836.697727][T21949] bond1071: (slave bridge1098): making interface the new active one [ 2836.727923][T21949] bridge1098: entered promiscuous mode [ 2836.758508][T21949] bond1071: (slave bridge1098): Enslaving as an active interface with an up link 17:03:21 executing program 0: pipe(&(0x7f0000000780)={0xffffffffffffffff}) r1 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000800), 0xffffffffffffffff) sendmsg$TIPC_NL_BEARER_SET(r0, &(0x7f0000000900)={&(0x7f00000007c0)={0x10, 0x0, 0x0, 0x800}, 0xc, &(0x7f00000008c0)={&(0x7f0000000840)={0x54, r1, 0x300, 0x70bd2b, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x40, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x10}, @TIPC_NLA_PROP_WIN={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0xb3c7}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x6}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x9}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}]}]}, 0x54}}, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$SEG6_CMD_SETHMAC(r2, &(0x7f0000000100)={0x0, 0xe00, &(0x7f00000000c0)={&(0x7f0000000040)={0x34, r3, 0x1, 0x0, 0x0, {}, [@SEG6_ATTR_HMACKEYID={0x8, 0x3, 0x1f}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x4}, @SEG6_ATTR_SECRET={0x8, 0x4, [0x0]}, @SEG6_ATTR_ALGID={0x5}]}, 0x34}}, 0x0) r4 = socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000680)=ANY=[@ANYBLOB="46040000d448d30ad21fd12d11a1914f8292d562920554fe53e95a4103d50534775806ee4b4427285f62a50b6a1c0dafe75c834b6db8545ec47f3cf2bee4a7ec293ada7e6f9612375cb4a93dffd92827575429d0b6fb7ab831da88f39afdef016fa9ac9fdf2457ebc7207d0d332d4900b422f6e04d18d0fd956eca28495b70399fdd6f823892bd4b033b9a13693f58f841717edc1a705519421c2c3871f4b4d0867d1286dbbe613da4d67090c0335a3a6d8643f062dd260632dc534324e90686ce", @ANYRES16=r5, @ANYBLOB="ff830500000000000000", @ANYRES32=r4], 0x4}}, 0x0) r6 = socket$igmp(0x2, 0x3, 0x2) r7 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = accept4$nfc_llcp(r0, &(0x7f0000000280), &(0x7f0000000940)=0x60, 0x800) ioctl$FS_IOC_RESVSP(0xffffffffffffffff, 0x40305828, &(0x7f00000009c0)={0x0, 0x1, 0x0, 0x200}) ioctl$F2FS_IOC_WRITE_CHECKPOINT(r10, 0xf507, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) r11 = syz_genetlink_get_family_id$batadv(&(0x7f0000000580), r2) sendmsg$BATADV_CMD_TP_METER(r8, &(0x7f0000000640)={&(0x7f0000000540), 0xc, &(0x7f0000000600)={&(0x7f00000005c0)={0x1c, r11, 0x100, 0x70bd2b, 0x25dfdbfc, {}, [@BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5}]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000080}, 0x4000000) sendfile(r9, r8, 0x0, 0x800000000000c) ioctl$sock_SIOCGIFINDEX_80211(r9, 0x8933, &(0x7f0000000500)={'wlan1\x00'}) r12 = gettid() r13 = socket$inet6(0xa, 0x1, 0x8010000000000084) bind$inet6(r13, &(0x7f00000000c0)={0xa, 0x4e21, 0x0, @empty}, 0x1c) connect$inet6(r13, &(0x7f0000000000)={0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x39}}}, 0x1c) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r13, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) write$binfmt_misc(r7, &(0x7f0000000180)=ANY=[@ANYRESHEX, @ANYRES32=0x0, @ANYRES64=r13, @ANYRES64=r12, @ANYRES32=r9, @ANYRESOCT], 0x4) sendmsg$DEVLINK_CMD_PORT_GET(0xffffffffffffffff, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf4, 0x0, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf4}, 0x1, 0x0, 0x0, 0x10}, 0x0) setsockopt$IP_VS_SO_SET_ADD(r6, 0x0, 0x482, &(0x7f0000000080)={0x84, @rand_addr, 0x0, 0x0, 'wrr\x00', 0x0, 0x7fffffff, 0x36}, 0x2c) sendmsg$BATADV_CMD_GET_HARDIF(r2, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e95", @ANYRES16=r5, @ANYBLOB="000826bd0800fbdbdf250500000005002a000100000005003700000000000500330002"], 0x34}}, 0x21) 17:03:21 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x252, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2836.876556][T21951] bond647: entered promiscuous mode [ 2836.906795][T21951] 8021q: adding VLAN 0 to HW filter on device bond647 17:03:21 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x3}]}, 0x3c}}, 0x0) [ 2837.062884][T21956] bond1003: entered promiscuous mode [ 2837.070699][T21956] 8021q: adding VLAN 0 to HW filter on device bond1003 [ 2837.131729][T21959] bond771: entered promiscuous mode [ 2837.137372][T21959] 8021q: adding VLAN 0 to HW filter on device bond771 [ 2837.291560][T21961] bond1003: (slave bridge1069): making interface the new active one [ 2837.302400][T21961] bridge1069: entered promiscuous mode [ 2837.323968][T21961] bond1003: (slave bridge1069): Enslaving as an active interface with an up link 17:03:22 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfa030000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:22 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x600}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:22 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xe000000) [ 2837.505515][T21983] bond1072: entered promiscuous mode [ 2837.526115][T21983] 8021q: adding VLAN 0 to HW filter on device bond1072 [ 2837.693738][T21985] bond1072: (slave bridge1099): making interface the new active one [ 2837.703572][T21985] bridge1099: entered promiscuous mode [ 2837.716615][T21985] bond1072: (slave bridge1099): Enslaving as an active interface with an up link 17:03:22 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x261, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2837.760584][T21993] bond648: entered promiscuous mode [ 2837.766312][T21993] 8021q: adding VLAN 0 to HW filter on device bond648 [ 2837.778244][T21996] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:22 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x5}]}, 0x3c}}, 0x0) 17:03:22 executing program 0: pipe(&(0x7f0000000780)={0xffffffffffffffff}) r1 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000800), 0xffffffffffffffff) sendmsg$TIPC_NL_BEARER_SET(r0, &(0x7f0000000900)={&(0x7f00000007c0)={0x10, 0x0, 0x0, 0x800}, 0xc, &(0x7f00000008c0)={&(0x7f0000000840)={0x54, r1, 0x300, 0x70bd2b, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x40, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x10}, @TIPC_NLA_PROP_WIN={0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0xb3c7}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x6}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x9}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}]}]}, 0x54}}, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$SEG6_CMD_SETHMAC(r2, &(0x7f0000000100)={0x0, 0xe00, &(0x7f00000000c0)={&(0x7f0000000040)={0x34, r3, 0x1, 0x0, 0x0, {}, [@SEG6_ATTR_HMACKEYID={0x8, 0x3, 0x1f}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x4}, @SEG6_ATTR_SECRET={0x8, 0x4, [0x0]}, @SEG6_ATTR_ALGID={0x5}]}, 0x34}}, 0x0) r4 = socket$rxrpc(0x21, 0x2, 0xa) r5 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000680)=ANY=[@ANYBLOB="46040000d448d30ad21fd12d11a1914f8292d562920554fe53e95a4103d50534775806ee4b4427285f62a50b6a1c0dafe75c834b6db8545ec47f3cf2bee4a7ec293ada7e6f9612375cb4a93dffd92827575429d0b6fb7ab831da88f39afdef016fa9ac9fdf2457ebc7207d0d332d4900b422f6e04d18d0fd956eca28495b70399fdd6f823892bd4b033b9a13693f58f841717edc1a705519421c2c3871f4b4d0867d1286dbbe613da4d67090c0335a3a6d8643f062dd260632dc534324e90686ce", @ANYRES16=r5, @ANYBLOB="ff830500000000000000", @ANYRES32=r4], 0x4}}, 0x0) r6 = socket$igmp(0x2, 0x3, 0x2) r7 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = accept4$nfc_llcp(r0, &(0x7f0000000280), &(0x7f0000000940)=0x60, 0x800) ioctl$FS_IOC_RESVSP(0xffffffffffffffff, 0x40305828, &(0x7f00000009c0)={0x0, 0x1, 0x0, 0x200}) ioctl$F2FS_IOC_WRITE_CHECKPOINT(r10, 0xf507, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) r11 = syz_genetlink_get_family_id$batadv(&(0x7f0000000580), r2) sendmsg$BATADV_CMD_TP_METER(r8, &(0x7f0000000640)={&(0x7f0000000540), 0xc, &(0x7f0000000600)={&(0x7f00000005c0)={0x1c, r11, 0x100, 0x70bd2b, 0x25dfdbfc, {}, [@BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED={0x5}]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000080}, 0x4000000) sendfile(r9, r8, 0x0, 0x800000000000c) ioctl$sock_SIOCGIFINDEX_80211(r9, 0x8933, &(0x7f0000000500)={'wlan1\x00'}) r12 = gettid() r13 = socket$inet6(0xa, 0x1, 0x8010000000000084) bind$inet6(r13, &(0x7f00000000c0)={0xa, 0x4e21, 0x0, @empty}, 0x1c) connect$inet6(r13, &(0x7f0000000000)={0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x39}}}, 0x1c) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r13, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) write$binfmt_misc(r7, &(0x7f0000000180)=ANY=[@ANYRESHEX, @ANYRES32=0x0, @ANYRES64=r13, @ANYRES64=r12, @ANYRES32=r9, @ANYRESOCT], 0x4) sendmsg$DEVLINK_CMD_PORT_GET(0xffffffffffffffff, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf4, 0x0, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf4}, 0x1, 0x0, 0x0, 0x10}, 0x0) setsockopt$IP_VS_SO_SET_ADD(r6, 0x0, 0x482, &(0x7f0000000080)={0x84, @rand_addr, 0x0, 0x0, 'wrr\x00', 0x0, 0x7fffffff, 0x36}, 0x2c) sendmsg$BATADV_CMD_GET_HARDIF(r2, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e95", @ANYRES16=r5, @ANYBLOB="000826bd0800fbdbdf250500000005002a000100000005003700000000000500330002"], 0x34}}, 0x21) [ 2837.973465][T21999] bond1004: entered promiscuous mode [ 2837.981220][T21999] 8021q: adding VLAN 0 to HW filter on device bond1004 [ 2838.040447][T22004] bond1004: (slave bridge1070): making interface the new active one [ 2838.048533][T22004] bridge1070: entered promiscuous mode [ 2838.064274][T22004] bond1004: (slave bridge1070): Enslaving as an active interface with an up link 17:03:23 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfaab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:23 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x10000000) [ 2838.209725][T22007] bond772: entered promiscuous mode [ 2838.233378][T22007] 8021q: adding VLAN 0 to HW filter on device bond772 17:03:23 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xa00}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:23 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x6}]}, 0x3c}}, 0x0) [ 2838.494255][T22013] bond649: entered promiscuous mode [ 2838.501420][T22013] 8021q: adding VLAN 0 to HW filter on device bond649 [ 2838.587056][T22019] bond1073: entered promiscuous mode [ 2838.601910][T22019] 8021q: adding VLAN 0 to HW filter on device bond1073 17:03:23 executing program 0: unshare(0x4c060000) unshare(0x4020580) r0 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000160100000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000380)='rcu_utilization\x00', r0}, 0x10) r1 = socket$alg(0x26, 0x5, 0x0) bind$alg(r1, &(0x7f0000001300)={0x26, 'skcipher\x00', 0x0, 0x0, 'ecb(arc4)\x00'}, 0x58) unshare(0x2000000) r2 = accept$alg(r1, 0x0, 0x0) socket$packet(0x11, 0x0, 0x300) socket(0x0, 0x803, 0x0) r3 = socket(0x2, 0xa, 0x2) setsockopt$ALG_SET_KEY(r3, 0x117, 0x1, &(0x7f0000000000), 0x0) sendto$packet(0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0, 0x0) r4 = socket$unix(0x1, 0x2, 0x0) ioctl$ifreq_SIOCGIFINDEX_vcan(r4, 0x8933, &(0x7f0000000100)={'vcan0\x00'}) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000340)='cpuacct.usage_all\x00', 0x0, 0x0) setsockopt$ALG_SET_KEY(r1, 0x117, 0x1, &(0x7f0000000100)="8b", 0x1) syz_genetlink_get_family_id$nl80211(&(0x7f0000000500), r2) recvmmsg(r2, &(0x7f0000006100), 0x400000000000682, 0x0, 0x0) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='sched_switch\x00'}, 0x10) r5 = socket$nl_route(0x10, 0x3, 0x0) r6 = socket(0x10, 0x3, 0x0) r7 = socket$nl_route(0x10, 0x3, 0x0) r8 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r8, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r8, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r7, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r9, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r6, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r9, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(r5, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r9, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) sendmmsg$inet(r1, &(0x7f0000000ac0)=[{{&(0x7f0000000000)={0x2, 0x4e23, @multicast1}, 0x10, &(0x7f00000006c0)=[{&(0x7f0000000140)="bb37bf39d354cc138cbc532ac328ec5b409b159418aac6ec7a1cd80facb3b7e63c0c55d036031a802be8f090fc0902733a5acf9ee514855758911d4fa3a1376f03445b23c3868956c2786c5f27f00656578636343b", 0x55}, {&(0x7f0000000280)="61a28ce68834b0cc9274e21058393e9c64ba131b9a0c8f2d134cc6c78e9986cb1a9fdb90b2e18597651bf4090b918ba9a11af5b6cc5f7fa2678edc71a5811c620f9d8eeba8aff2c5464665ad683a0bd9690fac4d748d20de34599088a67d3fc0bd01d0b4ddccf731f14d53d8ebaaef40e2bf2c73fb407b3f26bbacedf962", 0x7e}, {&(0x7f0000000400)="4d2a467716325b50558d0ef340cad4f339c16115ce3a4570ca5a7cbe0ee4178467dd26241b6b20bfb0b1114c098b43f36037ccbe79d338a9d09bd84805377463bbddf9feef315e7066000b74d48c432f2b57bd8ec6f1d0659a838a60e1b39131eeac7b82fca12473229287dc5a74b6c72337564461ced7f6", 0x78}, {&(0x7f0000000480)="c7ef1541680d7d5e1743f4187f18bf3cbbb6baaaf1fcb8460dfd6a4215fc86fd08b26b65ee4399696b61fea1f79d380896809191087581a24ce707e0fabf3d39aa7c33fe6c6c73c1de2938e03c4e57f65c343023be1a543db9da8b4e9f02b1cebe47e71a4a902d6493fe3156", 0x6c}, {&(0x7f0000001380)="0fb11281c27bdc3abc3d19c089e856b7ceaee56bfb629d06da0555e8b1244adfa224986e8ec3966ecd2d35857f397f5ee1b3660f5571249ec0b1f1bc12c9ad2ca33e63115b38f80d3e3d2c04ee2b37d05f6dcff652db95f43d67adfc772472fa33388c0a4fbeeb7cae249da0f009972f4802b3772614df944b111cfcfbcea5e7195f53b418e23e2b9c94f66a29a91acf808cf95fbe8684dbab3ca1b68a0eb5fa3fc19403a2febc374087049f35b394acd7e682a335c6c90c65fac4e4a64e6acdbd002333964edfd396736ed539fcc73c1d1d1cd8ea2225c4254484570b2556e201bf33d2f677d9cb9ab1e47f8f67728393b467b391c54a01dc716aa71ec382120a07fde44bd45dacc770222b44a25d7c57a837de9c61566dec9dc2b025a4dc60c2603a8e0d165853d33985f7503f01bc852440fb7e2b5182167c1ffaccc59e2cc71cbe4078641e72134dd2cff06c5a36d3207e24ce5c09acea0b355ef63c7e10bb095c10d8110709ef7ee3ec2233d160f4963f2a829de831f3c457f8f14597a52388cc876b5fd541a91a4c616cc62921a627446b3980c06450b7ff1729da5b2b1c8a173c531ebe509a250fda5f5aaa8d4245f908665ddbb63530b8620f9bb49f20ac73c5d3601e879b2cec954941d2f52d9eb4b56485fa5b56931492485b8e4f187892d2a00d5a7c0986125b4dcc91c2aaf1d5753b03f438e3dea6fcfea827ae3f4a6f12a48e7e910ed9dcacbd1b4a7f1abd7262d28e6ec717296fc423e2db6613d73ac9e612e3f9bad4a5a3aae4e2150b3f9439fb79891931fc7266339f97f718b95464b9987f8e3bf1086e9ace776dea0e03d88a09918205117e2866a53569e7e5a2409fab97c6e903cbbf464df34dbfa07558727518e20f446794106d1273848c04cded190cb55e2849d63270a47858899721f990fd95f39ad65f2b19f3cc5f0a399fa4318dfea27cbced4261aaba99ec806917e912fcb8472187fed255ddc6cf4f48c752d98adbd3d1a96ca7cafca1d42499dab3f56a99b728e763ef29eb548153cc98a2b096ef4cfd3e3c622b0fdb11d8505925e906b8ab3c9c6903c7d0843ef47774f73b35bfe77edbb09354fe95c2eea4d96e9ae38bd3bd83fb27e807803df008b0b162053a86ffa8c0ea1ff98330b024eb0bd7083d3e2bb1c900c7e3d53f118ab1d54524cb167b02cda6232bac806ce629039a5b00a59926311ce08274e7f24eace8b559f3283dc3cbc4228c121fc33fa2f6188364acb4f16422adb5aefa2e826ecbc9eb1c943cb8d411299797c48883f93f9c23327e23159cd19d6b9417ca1086170210e3c56ac6cb8550c6eddd2e08fcaddd7809d8dab229c17265c4f5fc597b1ff2027a9b3344bad0324a7405fad2e6b083cc4e32ccc3ef034ded856e5b8ff8b0f59e85e61895ae8d11cf1399ed8c4c5807014eef632718978e489af24d2c6f1875842bc8859cf714900a157d475bd550ad7166784a8d77643275ab0887c105f7f5360f776807c5ce1c16a635f2548eab0db8984f1847044f393772160350324687e08e5099787e5ac371d4abb44c8ea60d280371137aaaf45562751b921174a894a835dd5256427b9cfb3fe64ac5dd1bb0cd8362ed61a6a72a36644ce8aa5185f0d35809505ed64638b9f543332afbf8e4fbe78efa70576c094250ec28411c78ced8153aedbde3133e887a98ef55191899082261eed06c01ee37ac8f1ecdb39373c77cd59721ff1d0f3105394e6fcf93020247e22f43b1cc44b6d679d3e8ecf265e2f59809fe34937ca6771d905420f646e8395b4544afa15a71d94c3891beda56aa7d0adca688c25227341a5c651582142ecb4d50846e4df257da65c60eadaeda89443821b6344161d449cfa16275fa628fed385fccf6eeb97197f9018af73f41fc05958d54aa305e15484b665c751c4c408bcbed037b452b48b0c9232944c9dbcc10814dc40dfff3ea2464c06b7f0f8c207cf2604d7e2b831d3930effb773f20eb53820bae4bc5a0ae661470d3b5288e2e1183bb195f9d2a0ef806cde5ff401db0116391c8167b97b5c3bdd25245b6d4cf7eae611fdac496b2706de69c3dcf728c316025b9958f8d015670cec0c4cc17d77430c6fb44add2bd2c34bcbc3a987f1e96839a0455db64dc7b5ffccc288815986118aea5d0d1fe11bd412d176b230729aea32211c9ff68474e0854343e9b53608258bc0667fa27161d3073d78508848fc9ff920a54a097665dd438adbf317b28bb10fb11ba822fe643a45f9a126a99e77506cd0e5907becd0a84864ee3ff408feb69bca6ec3978f8079677d675eba078091068e4170c5041c5f724f879f23a42445a0ec42940749ff3ac26c56375459b98e5fa251b9b4129522c847da684f8b83a6cca10fd85fc13c46965a3193d091d7eabfc3974230efebaad535b8b0d69202125dd3cbd6bdd0dd056ce47fbda5f09acd005a49b77c0d599ba32c4b8ee9cf5ef1ec1cc60bfc5f24d3e1a2046b49f6fe1c48f9c427c66c700605fb7b51f9367ad91f4f2705e1eb5c8a4a01845f6602ad24ed692b90f5979aaad96472f052c2a8b9c748f12b83614d66dd608c8b263cf36197abc23772c2d506c9f426b870acb71f94343d798576edf31bd249f241470c1c50f2fa43728a704d3f980915852d243bb4a7cb0b378971a431644283aa9be9ac80f45683e61ac1364fb8d5889196bc65995d73209642aa78c06d2a6537b7f7a661043068a14bd6e950e9bc0c60433d3f7b2f113d50cd92254b77cbce781fd4ec8c175405b9a455e2bc6dabf667e94e96bae787f9f6bbfc7ddd6982567d979a16fe9eb3bb156027190c24e5687fbdbfc0c1e83cf1e95f90588b5aa03d8d9ef78fbcd0d91016d2c06bd12d049d4e2b0f39cdeef3a6c8b5fcece455ff8d2cfa5ca3e5a0b6d0814e1729bf6e6ce19c5dfd9346eb1564fc1c9e69149d416a1925d8ab155091ea0b98d85a52d9ad4df89d700fc15be69f63459620c29dadbeaa5a14f0bfd981f3b75d2cddbfde5adb5bc75896f6be000a349e5e832eb65a39882007665cc147b18fbc7a06f7855e07c9a2b1757cf774090557edd873c8195dc19e26a5863e978ca70c2ffdf39c503227c37907677cbc535715862ce2f200559ae7939a1060cf8d2a3175ea9a29a7bfd776c660a6eadaeb854a2dbd6013bc65fde87a55150c685cfe031b6ecd54f8a60a91ed4b034122baf504aae975626c39c2393c1f6313a1f1047e7258a055b2efc91135c430bca938e1532956c612c0d52f2615befd55213496b450c222ad06c62f3f1caa87a18774a15e53840e2f2c02e2372afdfd8092916255b83185502c2b297827c4c7db6c9e49206cb9fc6744e744f6a9c8f99573bd0be35b9c1c4def96f51523343fe6ef73df03d766ef442dc0de08fc45f0125ef5a1cf3927f791a6acfc20d9debf5dfcdb12dd23576784007444bd0449114f2ee6cbda6ea7ccaa7089fa88bdbae9b3b82cf384ff3bc03d9ea17923dfca0a61c02eedc762c5b4ec179582cf134831db6c0716abe5b74eecbd424bb1234764fd6ec10656706d76359c5e0cba4e03a2983d0e9f987bebdbe7ca305df2485e5f43a9d406cbedfbd3217ecc8cff78b73e7450f2ce96a3bdc06c243affadfdb1db94b262dd4735d5c4720928be6dbcc0a6855f17e083222e3ba4f3746dc9460b394b6c9cd4c8775fa8f3979bb0a1f7ece63a727be102be339c359da9ca2107d495a0fb8371099c4dd6f0f49312542077b7ed3c3c62f8d03fe20bdb1c8151c918d4f6b760b1e63d86d54c25ca2ecb9c432115874b1230384dc548fbcfd2d4dbef3f51380863eb37862104c20911c4bcd594f7de923c620b21357a592ae40f897d946ba77f7a2062bf1a2b08ea78d1bac6880a7c04bd4c5a817c0f159fb59a257019898cfbb390a2dd3c9ce476f50d36903ee9066428e42ef4258f836dfc236fc4424a14d40abf966f783c19e1f3d709bfdb0b96f0e98cf8d0fde38709b420998917fe017b3434713de7682bfbb1c60ea4acb67047ad0af4a5e980d8070138c2d1dd0ed24d2b0bf1ff83345b0e165b04494c6bf651917fb1ecc2052d59c4fcb16b00882b86ea535b5e895f1556cafff548813ed9ecd69a605260a7d613b928132ddcd921c2e0c3cb02dcf8d1fa4b9dfc50ca5c6b58cf057a4ba5f15f17f149c999c3b3a6e9a0ca71bd9df1742547fa99a3ca9bef7138d0a523ed75fc16ffbb8db66b18a4ea7587fe277f25015e301743c95473df8683518741f337fbe6f37517bae8d6f01dd3f664521312bc11c09f68e4ce1654afc8d6e80a514d112933f94647df6ddfc24e879bfb9ea6198de6a9ee49880a671858f9fb56ad16ed9315d3a104119b2a84c84c42adbd057919d8d778db5473f5281719fbc01a94ba77bce51d73a20b7776ae2d687adb85eed4b15f4c756a0a1d61e596e7cdd75d64b9005ea9538ce6300d2d8f7b851e30d1ba594da3ffef617aea1d9e111bcabd9a14f4b8b1c0dd43a1a7241c7024d469c0948175f8b9a32bf1124612e206b7d06d3fb391a9359c26e930213665133f4d9d1fb9f936afc4463f87ec6efe0df32b9b3e564fcf3f44b62f608ac52e8c24d2090e6b103345a3a74517e355c7e0334a4192a0092018b00bd4917cb0d86c9e19021ab29fb848c06022125e0c34399b1ee28ef6ac7e9836062ec89af05d191f234622ede0ceeb4fbdf00c3459b8d165e7c0fd9d601782b53f79e138cba90581b6016664d0aa290e8d9fe7c122cdbebcd2a53ea40e479254e16fa8c8b9f7286d0f1baf74e3755c5ca87fa06acd119e0a6ee3db0821138ebf8ede18217c066009f14f68800a0e746f23e7298a16506968a54a85dae2c6bbedf0dcbf2e9edbf883f6b94bdb035d6e875d8b5eb228b269b5ac60d1cd4447c5c875691c1e3812753b02de739ac7eaf59192888abfe32337186c97620c6b543727efcd29eb47cd876d0882663e4b88407e1e92f60aeb278f23faf5c2367e3e002ec57a35dbbe5894d905448d30ac59fa751b6493a6e70c8e6ded30f3648c297c0c6759c2dccfc9d22b1f442a89904049242587579940243a8f1b4a35ee7b973a03f4a875842271280677460104d1396bf694b60d32b00dac4d8dacf4db7c8b82f00eab73de3574e0d178f475ce0ce315a8a991388fb0c448657e384f98cb3b54ba2c7ace2806c8a04413ecda74f66aab99269737122cc2b386f784654c210e201f99b8bf65dcc40d7ee1bf135d43be2b3fe328f3c91a5ef69d4e6e6620e886ff6a0ed4398c1a938def9481222109c9b1dfd5e74eb30605e9d2afbbded2342b77ebd19fa60a3b784167fa16f0afa06b4398a2c9dab80c639844f72d871ef01c65941fc5c7027b1ffbf8f49f77382f37d13c1fd7b058cdb500754645081ee9d1f6116e5b95da0bfa3bcbfafceaec9b2bedc23bff4621daddbd0dc64daa7a48cf84ff5386022cc7dc591c9819a54611c14ae1590c8bdb5c02a4724809e60a34f9e977db2622be0b2c9bb54f986f8483c54be31210f970127f1fffb407eb124c95292bc7623c38dcc9d37bab91263067b67b12ba38af17e0e82e9805117a4a9c791fdb1307cff2cb225479885de935e65a1cbdbf7179d2339b80e024105321a4d6031c9f9be1bdb032bbf5a8edd359a634d397e9af4f069db89decd37f994344987930bdfe18cac9761fdb22184bd8e0b88a9bc2fb29cc5aa4e0e6a27ff1d3666c49347551e8798404f78efe42f88a7c5e93b31f7e77efc4e4c5a859c66ccb8ff4f056a53bf5f86b42f01343b9dfcff9df738899e91b78fd3", 0x1000}, {&(0x7f0000000540)="35cf95640d253cd8f65da8def623ab5802c7be602993d93924980ed5456f054025a50238421e0005b9ee83a23e0ebb9b6d54a82542f7ef4dc7edd19ba998a88e6330cebbe0fc1a33ce4fdc939e4aa2f43ee817b6285c94ea32f0b8c6d1758eeb5e17f91c7a1ba6fbb3fccc19918fea5ee06165b74086fb481104a8000677fd2267d2965b1694", 0x86}, {&(0x7f0000000600)="35faf47246cf9d499de5004ca8ebcbdd81b928208a82a2df8869e850aff0002b7c745cf4dfcb1d9adf48fbde031afd7f500359d8cfd076f87547ce700c814548250f6806813c2a5c5dd4581919c3c6d792b02d87a9235fd6473e1016bc8a28758fa927352d46a298bf7768531d4d63df3fefc0e55159fdf129c13fb61bb45184ff346f13eab0cbf540ca9b1947b06976a1de", 0x92}], 0x7, &(0x7f00000001c0)=[@ip_tos_u8={{0x11, 0x0, 0x1, 0x7f}}, @ip_ttl={{0x14, 0x0, 0x2, 0x5}}], 0x30}}, {{0x0, 0x0, &(0x7f0000000300)=[{&(0x7f0000000740)="389828f5b655dffd81be7a5ff345621fecee424644a30dd506248e903f6657575d12f98179bc20da28316d896e6e2706cee58da4ae7751cc4a54bd358b633e1086b3781631293037c470b18d43d8", 0x4e}], 0x1, &(0x7f00000007c0)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r9, @loopback, @private=0xa010102}}}, @ip_ttl={{0x14, 0x0, 0x2, 0xb11f}}], 0x38}}, {{0x0, 0x0, &(0x7f0000000a40)=[{&(0x7f0000000800)="f30e2411ea538b1d1a8ca18796e06f20a23aa732bdf1e8b7e3f9b6052cf82d27d425f7150fd30fa22f68bf71646832be85d136768e88f3606ecd5a0f325049aa72309b5d353d5fb20cf612c2bd663b97a4e21e99df53501f0acde9c470e59f8923213ae0125d3eab74f40bdb84084ae2e36db2c8d1440f5f817dc1a7b24bf73ecc0403cac3fd23d9bc9fd2004781afb9bb89574cc42a74805609840cf9d4507ea77f6b2c4850cbed2eada597fb8a5ea630de30cd4181dc58d1dd8ce2b74aaa7364e06c8760afa701f6c651e9163ada2e9f0a6ef8e8c3ec290212", 0xda}, {&(0x7f0000000900)="e90ec6165e978c486aa7f27ab9dd052249a2812f3b3253efdd4465bd4290263edddf142377b4dd2e3ea2a7fdb835e7bff47fe01382a7fc82311163e0ec5742e0d5adeb0b723bc3e81ecf22a3cb2a3d19c4a738efd5ff70153fca9ada3fda47f230f07151ee2754f9cd46622f145087fb5805d10b5a0f6c09a5cf6fe6d4c3aea526da5b51fb93961f9cc33eb617cedaf412b78befd60410eef9f21d01bc5b57533d76d3b6e938b038216d19fef50fde3604852fafdec414b11bba305acc2b7d89afb6a7d9d8d6230068ec61513934b1dbe9df29c0c8afcd0fbe66690689c47597127429998fa523466edeab17119871d9d4308578dd13a4b6d90cb26e4d9a", 0xfe}, {&(0x7f0000000a00)="60361e76bd584dc1c76a8c598161f72c42e050733fbf98560fa0cb7304", 0x1d}], 0x3, &(0x7f0000000a80)=[@ip_tos_int={{0x14}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x7f}}], 0x30}}], 0x3, 0x4000000) [ 2838.812241][T22022] bond1073: (slave bridge1100): making interface the new active one [ 2838.848387][T22022] bridge1100: entered promiscuous mode [ 2838.904340][T22022] bond1073: (slave bridge1100): Enslaving as an active interface with an up link 17:03:23 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x288, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2839.016690][T22028] bond1005: entered promiscuous mode [ 2839.047652][T22028] 8021q: adding VLAN 0 to HW filter on device bond1005 [ 2839.057938][T22051] "syz-executor.0" (22051) uses obsolete ecb(arc4) skcipher [ 2839.225357][T22031] bond1005: (slave bridge1071): making interface the new active one [ 2839.235386][T22031] bridge1071: entered promiscuous mode [ 2839.258157][T22031] bond1005: (slave bridge1071): Enslaving as an active interface with an up link 17:03:24 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfbab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:24 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x60000000) [ 2839.347173][T22038] validate_nla: 13 callbacks suppressed [ 2839.347203][T22038] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:03:24 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xc00}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2839.438156][T22038] workqueue: Failed to create a rescuer kthread for wq "bond773": -EINTR [ 2839.534769][T22041] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:03:24 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x9}]}, 0x3c}}, 0x0) [ 2839.588120][T22041] workqueue: Failed to create a rescuer kthread for wq "bond650": -EINTR [ 2839.643066][T22042] netlink: 'syz-executor.4': attribute type 10 has an invalid length. [ 2839.756776][T22050] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2839.843674][T22050] bond1074: entered promiscuous mode [ 2839.850189][T22050] 8021q: adding VLAN 0 to HW filter on device bond1074 17:03:24 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x28a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2839.927015][T22053] bond1074: (slave bridge1101): making interface the new active one [ 2839.935956][T22053] bridge1101: entered promiscuous mode [ 2839.950284][T22053] bond1074: (slave bridge1101): Enslaving as an active interface with an up link [ 2839.964025][T22056] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2840.064875][T22063] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2840.121703][T22063] bond1006: entered promiscuous mode [ 2840.127624][T22063] 8021q: adding VLAN 0 to HW filter on device bond1006 [ 2840.196976][T22066] bond1006: (slave bridge1072): making interface the new active one [ 2840.206327][T22066] bridge1072: entered promiscuous mode [ 2840.225695][T22066] bond1006: (slave bridge1072): Enslaving as an active interface with an up link 17:03:25 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfc030000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:25 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x65580000) [ 2840.357668][T22071] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2840.452579][T22071] bond773: entered promiscuous mode [ 2840.468333][T22071] 8021q: adding VLAN 0 to HW filter on device bond773 [ 2840.502175][T22073] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2840.571014][T22073] bond650: entered promiscuous mode [ 2840.576826][T22073] 8021q: adding VLAN 0 to HW filter on device bond650 17:03:25 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xe00}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2840.627783][T22077] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:25 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xa}]}, 0x3c}}, 0x0) [ 2840.679910][T22079] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2840.824453][T22079] bond1075: entered promiscuous mode [ 2840.857920][T22079] 8021q: adding VLAN 0 to HW filter on device bond1075 17:03:26 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x292, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2841.047537][T22081] bond1075: (slave bridge1102): making interface the new active one [ 2841.057067][T22081] bridge1102: entered promiscuous mode [ 2841.070274][T22081] bond1075: (slave bridge1102): Enslaving as an active interface with an up link [ 2841.086365][T22083] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2841.174132][T22083] bond1007: entered promiscuous mode [ 2841.181548][T22083] 8021q: adding VLAN 0 to HW filter on device bond1007 [ 2841.252802][T22085] bond1007: (slave bridge1073): making interface the new active one [ 2841.261418][T22085] bridge1073: entered promiscuous mode [ 2841.275126][T22085] bond1007: (slave bridge1073): Enslaving as an active interface with an up link [ 2841.304143][T22096] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2841.374941][T22096] bond651: entered promiscuous mode [ 2841.383836][T22096] 8021q: adding VLAN 0 to HW filter on device bond651 [ 2841.425528][T22097] bond774: entered promiscuous mode [ 2841.431527][T22097] 8021q: adding VLAN 0 to HW filter on device bond774 [ 2841.445630][T22098] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. [ 2841.505883][ T1217] ieee802154 phy0 wpan0: encryption failed: -22 [ 2841.512353][ T1217] ieee802154 phy1 wpan1: encryption failed: -22 [ 2841.558356][T22047] lo speed is unknown, defaulting to 1000 [ 2841.642623][T22103] bond1076: entered promiscuous mode [ 2841.659153][T22103] 8021q: adding VLAN 0 to HW filter on device bond1076 [ 2841.832503][T22104] bond1076: (slave bridge1103): making interface the new active one [ 2841.842086][T22104] bridge1103: entered promiscuous mode [ 2841.853378][T22104] bond1076: (slave bridge1103): Enslaving as an active interface with an up link 17:03:29 executing program 0: unshare(0x4c060000) (async) unshare(0x4020580) (async) r0 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000160100000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000380)='rcu_utilization\x00', r0}, 0x10) (async) r1 = socket$alg(0x26, 0x5, 0x0) bind$alg(r1, &(0x7f0000001300)={0x26, 'skcipher\x00', 0x0, 0x0, 'ecb(arc4)\x00'}, 0x58) (async, rerun: 64) unshare(0x2000000) (rerun: 64) r2 = accept$alg(r1, 0x0, 0x0) (async, rerun: 64) socket$packet(0x11, 0x0, 0x300) (async, rerun: 64) socket(0x0, 0x803, 0x0) (async) r3 = socket(0x2, 0xa, 0x2) setsockopt$ALG_SET_KEY(r3, 0x117, 0x1, &(0x7f0000000000), 0x0) (async) sendto$packet(0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0, 0x0) (async) r4 = socket$unix(0x1, 0x2, 0x0) ioctl$ifreq_SIOCGIFINDEX_vcan(r4, 0x8933, &(0x7f0000000100)={'vcan0\x00'}) (async) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000340)='cpuacct.usage_all\x00', 0x0, 0x0) (async, rerun: 64) setsockopt$ALG_SET_KEY(r1, 0x117, 0x1, &(0x7f0000000100)="8b", 0x1) (rerun: 64) syz_genetlink_get_family_id$nl80211(&(0x7f0000000500), r2) recvmmsg(r2, &(0x7f0000006100), 0x400000000000682, 0x0, 0x0) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='sched_switch\x00'}, 0x10) (async, rerun: 32) r5 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 32) r6 = socket(0x10, 0x3, 0x0) (async, rerun: 64) r7 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 64) r8 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r8, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) (async) getsockname$packet(r8, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r7, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r9, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) (async) sendmsg$nl_route_sched(r6, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r9, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) (async, rerun: 32) sendmsg$nl_route_sched(r5, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r9, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) (async, rerun: 32) sendmmsg$inet(r1, &(0x7f0000000ac0)=[{{&(0x7f0000000000)={0x2, 0x4e23, @multicast1}, 0x10, &(0x7f00000006c0)=[{&(0x7f0000000140)="bb37bf39d354cc138cbc532ac328ec5b409b159418aac6ec7a1cd80facb3b7e63c0c55d036031a802be8f090fc0902733a5acf9ee514855758911d4fa3a1376f03445b23c3868956c2786c5f27f00656578636343b", 0x55}, {&(0x7f0000000280)="61a28ce68834b0cc9274e21058393e9c64ba131b9a0c8f2d134cc6c78e9986cb1a9fdb90b2e18597651bf4090b918ba9a11af5b6cc5f7fa2678edc71a5811c620f9d8eeba8aff2c5464665ad683a0bd9690fac4d748d20de34599088a67d3fc0bd01d0b4ddccf731f14d53d8ebaaef40e2bf2c73fb407b3f26bbacedf962", 0x7e}, {&(0x7f0000000400)="4d2a467716325b50558d0ef340cad4f339c16115ce3a4570ca5a7cbe0ee4178467dd26241b6b20bfb0b1114c098b43f36037ccbe79d338a9d09bd84805377463bbddf9feef315e7066000b74d48c432f2b57bd8ec6f1d0659a838a60e1b39131eeac7b82fca12473229287dc5a74b6c72337564461ced7f6", 0x78}, {&(0x7f0000000480)="c7ef1541680d7d5e1743f4187f18bf3cbbb6baaaf1fcb8460dfd6a4215fc86fd08b26b65ee4399696b61fea1f79d380896809191087581a24ce707e0fabf3d39aa7c33fe6c6c73c1de2938e03c4e57f65c343023be1a543db9da8b4e9f02b1cebe47e71a4a902d6493fe3156", 0x6c}, {&(0x7f0000001380)="0fb11281c27bdc3abc3d19c089e856b7ceaee56bfb629d06da0555e8b1244adfa224986e8ec3966ecd2d35857f397f5ee1b3660f5571249ec0b1f1bc12c9ad2ca33e63115b38f80d3e3d2c04ee2b37d05f6dcff652db95f43d67adfc772472fa33388c0a4fbeeb7cae249da0f009972f4802b3772614df944b111cfcfbcea5e7195f53b418e23e2b9c94f66a29a91acf808cf95fbe8684dbab3ca1b68a0eb5fa3fc19403a2febc374087049f35b394acd7e682a335c6c90c65fac4e4a64e6acdbd002333964edfd396736ed539fcc73c1d1d1cd8ea2225c4254484570b2556e201bf33d2f677d9cb9ab1e47f8f67728393b467b391c54a01dc716aa71ec382120a07fde44bd45dacc770222b44a25d7c57a837de9c61566dec9dc2b025a4dc60c2603a8e0d165853d33985f7503f01bc852440fb7e2b5182167c1ffaccc59e2cc71cbe4078641e72134dd2cff06c5a36d3207e24ce5c09acea0b355ef63c7e10bb095c10d8110709ef7ee3ec2233d160f4963f2a829de831f3c457f8f14597a52388cc876b5fd541a91a4c616cc62921a627446b3980c06450b7ff1729da5b2b1c8a173c531ebe509a250fda5f5aaa8d4245f908665ddbb63530b8620f9bb49f20ac73c5d3601e879b2cec954941d2f52d9eb4b56485fa5b56931492485b8e4f187892d2a00d5a7c0986125b4dcc91c2aaf1d5753b03f438e3dea6fcfea827ae3f4a6f12a48e7e910ed9dcacbd1b4a7f1abd7262d28e6ec717296fc423e2db6613d73ac9e612e3f9bad4a5a3aae4e2150b3f9439fb79891931fc7266339f97f718b95464b9987f8e3bf1086e9ace776dea0e03d88a09918205117e2866a53569e7e5a2409fab97c6e903cbbf464df34dbfa07558727518e20f446794106d1273848c04cded190cb55e2849d63270a47858899721f990fd95f39ad65f2b19f3cc5f0a399fa4318dfea27cbced4261aaba99ec806917e912fcb8472187fed255ddc6cf4f48c752d98adbd3d1a96ca7cafca1d42499dab3f56a99b728e763ef29eb548153cc98a2b096ef4cfd3e3c622b0fdb11d8505925e906b8ab3c9c6903c7d0843ef47774f73b35bfe77edbb09354fe95c2eea4d96e9ae38bd3bd83fb27e807803df008b0b162053a86ffa8c0ea1ff98330b024eb0bd7083d3e2bb1c900c7e3d53f118ab1d54524cb167b02cda6232bac806ce629039a5b00a59926311ce08274e7f24eace8b559f3283dc3cbc4228c121fc33fa2f6188364acb4f16422adb5aefa2e826ecbc9eb1c943cb8d411299797c48883f93f9c23327e23159cd19d6b9417ca1086170210e3c56ac6cb8550c6eddd2e08fcaddd7809d8dab229c17265c4f5fc597b1ff2027a9b3344bad0324a7405fad2e6b083cc4e32ccc3ef034ded856e5b8ff8b0f59e85e61895ae8d11cf1399ed8c4c5807014eef632718978e489af24d2c6f1875842bc8859cf714900a157d475bd550ad7166784a8d77643275ab0887c105f7f5360f776807c5ce1c16a635f2548eab0db8984f1847044f393772160350324687e08e5099787e5ac371d4abb44c8ea60d280371137aaaf45562751b921174a894a835dd5256427b9cfb3fe64ac5dd1bb0cd8362ed61a6a72a36644ce8aa5185f0d35809505ed64638b9f543332afbf8e4fbe78efa70576c094250ec28411c78ced8153aedbde3133e887a98ef55191899082261eed06c01ee37ac8f1ecdb39373c77cd59721ff1d0f3105394e6fcf93020247e22f43b1cc44b6d679d3e8ecf265e2f59809fe34937ca6771d905420f646e8395b4544afa15a71d94c3891beda56aa7d0adca688c25227341a5c651582142ecb4d50846e4df257da65c60eadaeda89443821b6344161d449cfa16275fa628fed385fccf6eeb97197f9018af73f41fc05958d54aa305e15484b665c751c4c408bcbed037b452b48b0c9232944c9dbcc10814dc40dfff3ea2464c06b7f0f8c207cf2604d7e2b831d3930effb773f20eb53820bae4bc5a0ae661470d3b5288e2e1183bb195f9d2a0ef806cde5ff401db0116391c8167b97b5c3bdd25245b6d4cf7eae611fdac496b2706de69c3dcf728c316025b9958f8d015670cec0c4cc17d77430c6fb44add2bd2c34bcbc3a987f1e96839a0455db64dc7b5ffccc288815986118aea5d0d1fe11bd412d176b230729aea32211c9ff68474e0854343e9b53608258bc0667fa27161d3073d78508848fc9ff920a54a097665dd438adbf317b28bb10fb11ba822fe643a45f9a126a99e77506cd0e5907becd0a84864ee3ff408feb69bca6ec3978f8079677d675eba078091068e4170c5041c5f724f879f23a42445a0ec42940749ff3ac26c56375459b98e5fa251b9b4129522c847da684f8b83a6cca10fd85fc13c46965a3193d091d7eabfc3974230efebaad535b8b0d69202125dd3cbd6bdd0dd056ce47fbda5f09acd005a49b77c0d599ba32c4b8ee9cf5ef1ec1cc60bfc5f24d3e1a2046b49f6fe1c48f9c427c66c700605fb7b51f9367ad91f4f2705e1eb5c8a4a01845f6602ad24ed692b90f5979aaad96472f052c2a8b9c748f12b83614d66dd608c8b263cf36197abc23772c2d506c9f426b870acb71f94343d798576edf31bd249f241470c1c50f2fa43728a704d3f980915852d243bb4a7cb0b378971a431644283aa9be9ac80f45683e61ac1364fb8d5889196bc65995d73209642aa78c06d2a6537b7f7a661043068a14bd6e950e9bc0c60433d3f7b2f113d50cd92254b77cbce781fd4ec8c175405b9a455e2bc6dabf667e94e96bae787f9f6bbfc7ddd6982567d979a16fe9eb3bb156027190c24e5687fbdbfc0c1e83cf1e95f90588b5aa03d8d9ef78fbcd0d91016d2c06bd12d049d4e2b0f39cdeef3a6c8b5fcece455ff8d2cfa5ca3e5a0b6d0814e1729bf6e6ce19c5dfd9346eb1564fc1c9e69149d416a1925d8ab155091ea0b98d85a52d9ad4df89d700fc15be69f63459620c29dadbeaa5a14f0bfd981f3b75d2cddbfde5adb5bc75896f6be000a349e5e832eb65a39882007665cc147b18fbc7a06f7855e07c9a2b1757cf774090557edd873c8195dc19e26a5863e978ca70c2ffdf39c503227c37907677cbc535715862ce2f200559ae7939a1060cf8d2a3175ea9a29a7bfd776c660a6eadaeb854a2dbd6013bc65fde87a55150c685cfe031b6ecd54f8a60a91ed4b034122baf504aae975626c39c2393c1f6313a1f1047e7258a055b2efc91135c430bca938e1532956c612c0d52f2615befd55213496b450c222ad06c62f3f1caa87a18774a15e53840e2f2c02e2372afdfd8092916255b83185502c2b297827c4c7db6c9e49206cb9fc6744e744f6a9c8f99573bd0be35b9c1c4def96f51523343fe6ef73df03d766ef442dc0de08fc45f0125ef5a1cf3927f791a6acfc20d9debf5dfcdb12dd23576784007444bd0449114f2ee6cbda6ea7ccaa7089fa88bdbae9b3b82cf384ff3bc03d9ea17923dfca0a61c02eedc762c5b4ec179582cf134831db6c0716abe5b74eecbd424bb1234764fd6ec10656706d76359c5e0cba4e03a2983d0e9f987bebdbe7ca305df2485e5f43a9d406cbedfbd3217ecc8cff78b73e7450f2ce96a3bdc06c243affadfdb1db94b262dd4735d5c4720928be6dbcc0a6855f17e083222e3ba4f3746dc9460b394b6c9cd4c8775fa8f3979bb0a1f7ece63a727be102be339c359da9ca2107d495a0fb8371099c4dd6f0f49312542077b7ed3c3c62f8d03fe20bdb1c8151c918d4f6b760b1e63d86d54c25ca2ecb9c432115874b1230384dc548fbcfd2d4dbef3f51380863eb37862104c20911c4bcd594f7de923c620b21357a592ae40f897d946ba77f7a2062bf1a2b08ea78d1bac6880a7c04bd4c5a817c0f159fb59a257019898cfbb390a2dd3c9ce476f50d36903ee9066428e42ef4258f836dfc236fc4424a14d40abf966f783c19e1f3d709bfdb0b96f0e98cf8d0fde38709b420998917fe017b3434713de7682bfbb1c60ea4acb67047ad0af4a5e980d8070138c2d1dd0ed24d2b0bf1ff83345b0e165b04494c6bf651917fb1ecc2052d59c4fcb16b00882b86ea535b5e895f1556cafff548813ed9ecd69a605260a7d613b928132ddcd921c2e0c3cb02dcf8d1fa4b9dfc50ca5c6b58cf057a4ba5f15f17f149c999c3b3a6e9a0ca71bd9df1742547fa99a3ca9bef7138d0a523ed75fc16ffbb8db66b18a4ea7587fe277f25015e301743c95473df8683518741f337fbe6f37517bae8d6f01dd3f664521312bc11c09f68e4ce1654afc8d6e80a514d112933f94647df6ddfc24e879bfb9ea6198de6a9ee49880a671858f9fb56ad16ed9315d3a104119b2a84c84c42adbd057919d8d778db5473f5281719fbc01a94ba77bce51d73a20b7776ae2d687adb85eed4b15f4c756a0a1d61e596e7cdd75d64b9005ea9538ce6300d2d8f7b851e30d1ba594da3ffef617aea1d9e111bcabd9a14f4b8b1c0dd43a1a7241c7024d469c0948175f8b9a32bf1124612e206b7d06d3fb391a9359c26e930213665133f4d9d1fb9f936afc4463f87ec6efe0df32b9b3e564fcf3f44b62f608ac52e8c24d2090e6b103345a3a74517e355c7e0334a4192a0092018b00bd4917cb0d86c9e19021ab29fb848c06022125e0c34399b1ee28ef6ac7e9836062ec89af05d191f234622ede0ceeb4fbdf00c3459b8d165e7c0fd9d601782b53f79e138cba90581b6016664d0aa290e8d9fe7c122cdbebcd2a53ea40e479254e16fa8c8b9f7286d0f1baf74e3755c5ca87fa06acd119e0a6ee3db0821138ebf8ede18217c066009f14f68800a0e746f23e7298a16506968a54a85dae2c6bbedf0dcbf2e9edbf883f6b94bdb035d6e875d8b5eb228b269b5ac60d1cd4447c5c875691c1e3812753b02de739ac7eaf59192888abfe32337186c97620c6b543727efcd29eb47cd876d0882663e4b88407e1e92f60aeb278f23faf5c2367e3e002ec57a35dbbe5894d905448d30ac59fa751b6493a6e70c8e6ded30f3648c297c0c6759c2dccfc9d22b1f442a89904049242587579940243a8f1b4a35ee7b973a03f4a875842271280677460104d1396bf694b60d32b00dac4d8dacf4db7c8b82f00eab73de3574e0d178f475ce0ce315a8a991388fb0c448657e384f98cb3b54ba2c7ace2806c8a04413ecda74f66aab99269737122cc2b386f784654c210e201f99b8bf65dcc40d7ee1bf135d43be2b3fe328f3c91a5ef69d4e6e6620e886ff6a0ed4398c1a938def9481222109c9b1dfd5e74eb30605e9d2afbbded2342b77ebd19fa60a3b784167fa16f0afa06b4398a2c9dab80c639844f72d871ef01c65941fc5c7027b1ffbf8f49f77382f37d13c1fd7b058cdb500754645081ee9d1f6116e5b95da0bfa3bcbfafceaec9b2bedc23bff4621daddbd0dc64daa7a48cf84ff5386022cc7dc591c9819a54611c14ae1590c8bdb5c02a4724809e60a34f9e977db2622be0b2c9bb54f986f8483c54be31210f970127f1fffb407eb124c95292bc7623c38dcc9d37bab91263067b67b12ba38af17e0e82e9805117a4a9c791fdb1307cff2cb225479885de935e65a1cbdbf7179d2339b80e024105321a4d6031c9f9be1bdb032bbf5a8edd359a634d397e9af4f069db89decd37f994344987930bdfe18cac9761fdb22184bd8e0b88a9bc2fb29cc5aa4e0e6a27ff1d3666c49347551e8798404f78efe42f88a7c5e93b31f7e77efc4e4c5a859c66ccb8ff4f056a53bf5f86b42f01343b9dfcff9df738899e91b78fd3", 0x1000}, {&(0x7f0000000540)="35cf95640d253cd8f65da8def623ab5802c7be602993d93924980ed5456f054025a50238421e0005b9ee83a23e0ebb9b6d54a82542f7ef4dc7edd19ba998a88e6330cebbe0fc1a33ce4fdc939e4aa2f43ee817b6285c94ea32f0b8c6d1758eeb5e17f91c7a1ba6fbb3fccc19918fea5ee06165b74086fb481104a8000677fd2267d2965b1694", 0x86}, {&(0x7f0000000600)="35faf47246cf9d499de5004ca8ebcbdd81b928208a82a2df8869e850aff0002b7c745cf4dfcb1d9adf48fbde031afd7f500359d8cfd076f87547ce700c814548250f6806813c2a5c5dd4581919c3c6d792b02d87a9235fd6473e1016bc8a28758fa927352d46a298bf7768531d4d63df3fefc0e55159fdf129c13fb61bb45184ff346f13eab0cbf540ca9b1947b06976a1de", 0x92}], 0x7, &(0x7f00000001c0)=[@ip_tos_u8={{0x11, 0x0, 0x1, 0x7f}}, @ip_ttl={{0x14, 0x0, 0x2, 0x5}}], 0x30}}, {{0x0, 0x0, &(0x7f0000000300)=[{&(0x7f0000000740)="389828f5b655dffd81be7a5ff345621fecee424644a30dd506248e903f6657575d12f98179bc20da28316d896e6e2706cee58da4ae7751cc4a54bd358b633e1086b3781631293037c470b18d43d8", 0x4e}], 0x1, &(0x7f00000007c0)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r9, @loopback, @private=0xa010102}}}, @ip_ttl={{0x14, 0x0, 0x2, 0xb11f}}], 0x38}}, {{0x0, 0x0, &(0x7f0000000a40)=[{&(0x7f0000000800)="f30e2411ea538b1d1a8ca18796e06f20a23aa732bdf1e8b7e3f9b6052cf82d27d425f7150fd30fa22f68bf71646832be85d136768e88f3606ecd5a0f325049aa72309b5d353d5fb20cf612c2bd663b97a4e21e99df53501f0acde9c470e59f8923213ae0125d3eab74f40bdb84084ae2e36db2c8d1440f5f817dc1a7b24bf73ecc0403cac3fd23d9bc9fd2004781afb9bb89574cc42a74805609840cf9d4507ea77f6b2c4850cbed2eada597fb8a5ea630de30cd4181dc58d1dd8ce2b74aaa7364e06c8760afa701f6c651e9163ada2e9f0a6ef8e8c3ec290212", 0xda}, {&(0x7f0000000900)="e90ec6165e978c486aa7f27ab9dd052249a2812f3b3253efdd4465bd4290263edddf142377b4dd2e3ea2a7fdb835e7bff47fe01382a7fc82311163e0ec5742e0d5adeb0b723bc3e81ecf22a3cb2a3d19c4a738efd5ff70153fca9ada3fda47f230f07151ee2754f9cd46622f145087fb5805d10b5a0f6c09a5cf6fe6d4c3aea526da5b51fb93961f9cc33eb617cedaf412b78befd60410eef9f21d01bc5b57533d76d3b6e938b038216d19fef50fde3604852fafdec414b11bba305acc2b7d89afb6a7d9d8d6230068ec61513934b1dbe9df29c0c8afcd0fbe66690689c47597127429998fa523466edeab17119871d9d4308578dd13a4b6d90cb26e4d9a", 0xfe}, {&(0x7f0000000a00)="60361e76bd584dc1c76a8c598161f72c42e050733fbf98560fa0cb7304", 0x1d}], 0x3, &(0x7f0000000a80)=[@ip_tos_int={{0x14}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x7f}}], 0x30}}], 0x3, 0x4000000) 17:03:29 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfcab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:29 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x81000000) 17:03:29 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xb}]}, 0x3c}}, 0x0) 17:03:29 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x1600}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:29 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2a2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2844.400936][T22127] "syz-executor.0" (22127) uses obsolete ecb(arc4) skcipher [ 2844.426130][T22118] bond775: entered promiscuous mode [ 2844.447363][T22127] "syz-executor.0" (22127) uses obsolete ecb(arc4) skcipher 17:03:29 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x88a8ffff) [ 2844.456966][T22118] 8021q: adding VLAN 0 to HW filter on device bond775 [ 2844.472079][T22114] validate_nla: 3 callbacks suppressed [ 2844.472102][T22114] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2844.495632][T22127] "syz-executor.0" (22127) uses obsolete ecb(arc4) skcipher [ 2844.525204][T22114] bond652: entered promiscuous mode [ 2844.532146][T22114] 8021q: adding VLAN 0 to HW filter on device bond652 [ 2844.546677][T22117] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:29 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x9effffff) [ 2844.626672][T22117] bond1077: entered promiscuous mode [ 2844.641586][T22117] 8021q: adding VLAN 0 to HW filter on device bond1077 [ 2844.665329][T22122] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2844.733772][T22122] bond1008: entered promiscuous mode [ 2844.740729][T22122] 8021q: adding VLAN 0 to HW filter on device bond1008 [ 2844.756507][T22129] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:29 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xc80e0000) 17:03:29 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xf}]}, 0x3c}}, 0x0) [ 2844.866640][T22131] bond1077: (slave bridge1104): making interface the new active one [ 2844.880064][T22131] bridge1104: entered promiscuous mode [ 2844.905757][T22131] bond1077: (slave bridge1104): Enslaving as an active interface with an up link 17:03:29 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2aa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:30 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x5865}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2845.116369][T22135] bond1008: (slave bridge1074): making interface the new active one [ 2845.137424][T22135] bridge1074: entered promiscuous mode [ 2845.156470][T22135] bond1008: (slave bridge1074): Enslaving as an active interface with an up link [ 2845.193776][T22159] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2845.246614][T22159] bond653: entered promiscuous mode [ 2845.253764][T22159] 8021q: adding VLAN 0 to HW filter on device bond653 [ 2845.265357][T22162] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. [ 2845.293366][T22164] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2845.359345][T22164] bond1078: entered promiscuous mode [ 2845.365098][T22164] 8021q: adding VLAN 0 to HW filter on device bond1078 [ 2845.381436][T22168] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2845.415382][T22168] bond776: entered promiscuous mode [ 2845.421221][T22168] 8021q: adding VLAN 0 to HW filter on device bond776 [ 2845.488114][T22169] bond1078: (slave bridge1105): making interface the new active one [ 2845.497947][T22169] bridge1105: entered promiscuous mode [ 2845.511019][T22169] bond1078: (slave bridge1105): Enslaving as an active interface with an up link [ 2845.740250][T22126] lo speed is unknown, defaulting to 1000 17:03:32 executing program 0: unshare(0x4c060000) (async) unshare(0x4020580) r0 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x3, &(0x7f00000000c0)=ANY=[@ANYBLOB="18000000160100000000000000080eff95"], &(0x7f0000000040)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000003c0)={&(0x7f0000000380)='rcu_utilization\x00', r0}, 0x10) r1 = socket$alg(0x26, 0x5, 0x0) bind$alg(r1, &(0x7f0000001300)={0x26, 'skcipher\x00', 0x0, 0x0, 'ecb(arc4)\x00'}, 0x58) (async) unshare(0x2000000) (async) r2 = accept$alg(r1, 0x0, 0x0) socket$packet(0x11, 0x0, 0x300) (async) socket(0x0, 0x803, 0x0) r3 = socket(0x2, 0xa, 0x2) setsockopt$ALG_SET_KEY(r3, 0x117, 0x1, &(0x7f0000000000), 0x0) (async, rerun: 64) sendto$packet(0xffffffffffffffff, 0x0, 0x0, 0x0, 0x0, 0x0) (async, rerun: 64) r4 = socket$unix(0x1, 0x2, 0x0) ioctl$ifreq_SIOCGIFINDEX_vcan(r4, 0x8933, &(0x7f0000000100)={'vcan0\x00'}) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000340)='cpuacct.usage_all\x00', 0x0, 0x0) setsockopt$ALG_SET_KEY(r1, 0x117, 0x1, &(0x7f0000000100)="8b", 0x1) (async, rerun: 64) syz_genetlink_get_family_id$nl80211(&(0x7f0000000500), r2) (rerun: 64) recvmmsg(r2, &(0x7f0000006100), 0x400000000000682, 0x0, 0x0) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='sched_switch\x00'}, 0x10) (async) r5 = socket$nl_route(0x10, 0x3, 0x0) r6 = socket(0x10, 0x3, 0x0) (async, rerun: 64) r7 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 64) r8 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r8, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r8, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r7, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r9, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) (async) sendmsg$nl_route_sched(r6, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r9, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) (async) sendmsg$nl_route_sched(r5, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r9, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) (async) sendmmsg$inet(r1, &(0x7f0000000ac0)=[{{&(0x7f0000000000)={0x2, 0x4e23, @multicast1}, 0x10, &(0x7f00000006c0)=[{&(0x7f0000000140)="bb37bf39d354cc138cbc532ac328ec5b409b159418aac6ec7a1cd80facb3b7e63c0c55d036031a802be8f090fc0902733a5acf9ee514855758911d4fa3a1376f03445b23c3868956c2786c5f27f00656578636343b", 0x55}, {&(0x7f0000000280)="61a28ce68834b0cc9274e21058393e9c64ba131b9a0c8f2d134cc6c78e9986cb1a9fdb90b2e18597651bf4090b918ba9a11af5b6cc5f7fa2678edc71a5811c620f9d8eeba8aff2c5464665ad683a0bd9690fac4d748d20de34599088a67d3fc0bd01d0b4ddccf731f14d53d8ebaaef40e2bf2c73fb407b3f26bbacedf962", 0x7e}, {&(0x7f0000000400)="4d2a467716325b50558d0ef340cad4f339c16115ce3a4570ca5a7cbe0ee4178467dd26241b6b20bfb0b1114c098b43f36037ccbe79d338a9d09bd84805377463bbddf9feef315e7066000b74d48c432f2b57bd8ec6f1d0659a838a60e1b39131eeac7b82fca12473229287dc5a74b6c72337564461ced7f6", 0x78}, {&(0x7f0000000480)="c7ef1541680d7d5e1743f4187f18bf3cbbb6baaaf1fcb8460dfd6a4215fc86fd08b26b65ee4399696b61fea1f79d380896809191087581a24ce707e0fabf3d39aa7c33fe6c6c73c1de2938e03c4e57f65c343023be1a543db9da8b4e9f02b1cebe47e71a4a902d6493fe3156", 0x6c}, {&(0x7f0000001380)="0fb11281c27bdc3abc3d19c089e856b7ceaee56bfb629d06da0555e8b1244adfa224986e8ec3966ecd2d35857f397f5ee1b3660f5571249ec0b1f1bc12c9ad2ca33e63115b38f80d3e3d2c04ee2b37d05f6dcff652db95f43d67adfc772472fa33388c0a4fbeeb7cae249da0f009972f4802b3772614df944b111cfcfbcea5e7195f53b418e23e2b9c94f66a29a91acf808cf95fbe8684dbab3ca1b68a0eb5fa3fc19403a2febc374087049f35b394acd7e682a335c6c90c65fac4e4a64e6acdbd002333964edfd396736ed539fcc73c1d1d1cd8ea2225c4254484570b2556e201bf33d2f677d9cb9ab1e47f8f67728393b467b391c54a01dc716aa71ec382120a07fde44bd45dacc770222b44a25d7c57a837de9c61566dec9dc2b025a4dc60c2603a8e0d165853d33985f7503f01bc852440fb7e2b5182167c1ffaccc59e2cc71cbe4078641e72134dd2cff06c5a36d3207e24ce5c09acea0b355ef63c7e10bb095c10d8110709ef7ee3ec2233d160f4963f2a829de831f3c457f8f14597a52388cc876b5fd541a91a4c616cc62921a627446b3980c06450b7ff1729da5b2b1c8a173c531ebe509a250fda5f5aaa8d4245f908665ddbb63530b8620f9bb49f20ac73c5d3601e879b2cec954941d2f52d9eb4b56485fa5b56931492485b8e4f187892d2a00d5a7c0986125b4dcc91c2aaf1d5753b03f438e3dea6fcfea827ae3f4a6f12a48e7e910ed9dcacbd1b4a7f1abd7262d28e6ec717296fc423e2db6613d73ac9e612e3f9bad4a5a3aae4e2150b3f9439fb79891931fc7266339f97f718b95464b9987f8e3bf1086e9ace776dea0e03d88a09918205117e2866a53569e7e5a2409fab97c6e903cbbf464df34dbfa07558727518e20f446794106d1273848c04cded190cb55e2849d63270a47858899721f990fd95f39ad65f2b19f3cc5f0a399fa4318dfea27cbced4261aaba99ec806917e912fcb8472187fed255ddc6cf4f48c752d98adbd3d1a96ca7cafca1d42499dab3f56a99b728e763ef29eb548153cc98a2b096ef4cfd3e3c622b0fdb11d8505925e906b8ab3c9c6903c7d0843ef47774f73b35bfe77edbb09354fe95c2eea4d96e9ae38bd3bd83fb27e807803df008b0b162053a86ffa8c0ea1ff98330b024eb0bd7083d3e2bb1c900c7e3d53f118ab1d54524cb167b02cda6232bac806ce629039a5b00a59926311ce08274e7f24eace8b559f3283dc3cbc4228c121fc33fa2f6188364acb4f16422adb5aefa2e826ecbc9eb1c943cb8d411299797c48883f93f9c23327e23159cd19d6b9417ca1086170210e3c56ac6cb8550c6eddd2e08fcaddd7809d8dab229c17265c4f5fc597b1ff2027a9b3344bad0324a7405fad2e6b083cc4e32ccc3ef034ded856e5b8ff8b0f59e85e61895ae8d11cf1399ed8c4c5807014eef632718978e489af24d2c6f1875842bc8859cf714900a157d475bd550ad7166784a8d77643275ab0887c105f7f5360f776807c5ce1c16a635f2548eab0db8984f1847044f393772160350324687e08e5099787e5ac371d4abb44c8ea60d280371137aaaf45562751b921174a894a835dd5256427b9cfb3fe64ac5dd1bb0cd8362ed61a6a72a36644ce8aa5185f0d35809505ed64638b9f543332afbf8e4fbe78efa70576c094250ec28411c78ced8153aedbde3133e887a98ef55191899082261eed06c01ee37ac8f1ecdb39373c77cd59721ff1d0f3105394e6fcf93020247e22f43b1cc44b6d679d3e8ecf265e2f59809fe34937ca6771d905420f646e8395b4544afa15a71d94c3891beda56aa7d0adca688c25227341a5c651582142ecb4d50846e4df257da65c60eadaeda89443821b6344161d449cfa16275fa628fed385fccf6eeb97197f9018af73f41fc05958d54aa305e15484b665c751c4c408bcbed037b452b48b0c9232944c9dbcc10814dc40dfff3ea2464c06b7f0f8c207cf2604d7e2b831d3930effb773f20eb53820bae4bc5a0ae661470d3b5288e2e1183bb195f9d2a0ef806cde5ff401db0116391c8167b97b5c3bdd25245b6d4cf7eae611fdac496b2706de69c3dcf728c316025b9958f8d015670cec0c4cc17d77430c6fb44add2bd2c34bcbc3a987f1e96839a0455db64dc7b5ffccc288815986118aea5d0d1fe11bd412d176b230729aea32211c9ff68474e0854343e9b53608258bc0667fa27161d3073d78508848fc9ff920a54a097665dd438adbf317b28bb10fb11ba822fe643a45f9a126a99e77506cd0e5907becd0a84864ee3ff408feb69bca6ec3978f8079677d675eba078091068e4170c5041c5f724f879f23a42445a0ec42940749ff3ac26c56375459b98e5fa251b9b4129522c847da684f8b83a6cca10fd85fc13c46965a3193d091d7eabfc3974230efebaad535b8b0d69202125dd3cbd6bdd0dd056ce47fbda5f09acd005a49b77c0d599ba32c4b8ee9cf5ef1ec1cc60bfc5f24d3e1a2046b49f6fe1c48f9c427c66c700605fb7b51f9367ad91f4f2705e1eb5c8a4a01845f6602ad24ed692b90f5979aaad96472f052c2a8b9c748f12b83614d66dd608c8b263cf36197abc23772c2d506c9f426b870acb71f94343d798576edf31bd249f241470c1c50f2fa43728a704d3f980915852d243bb4a7cb0b378971a431644283aa9be9ac80f45683e61ac1364fb8d5889196bc65995d73209642aa78c06d2a6537b7f7a661043068a14bd6e950e9bc0c60433d3f7b2f113d50cd92254b77cbce781fd4ec8c175405b9a455e2bc6dabf667e94e96bae787f9f6bbfc7ddd6982567d979a16fe9eb3bb156027190c24e5687fbdbfc0c1e83cf1e95f90588b5aa03d8d9ef78fbcd0d91016d2c06bd12d049d4e2b0f39cdeef3a6c8b5fcece455ff8d2cfa5ca3e5a0b6d0814e1729bf6e6ce19c5dfd9346eb1564fc1c9e69149d416a1925d8ab155091ea0b98d85a52d9ad4df89d700fc15be69f63459620c29dadbeaa5a14f0bfd981f3b75d2cddbfde5adb5bc75896f6be000a349e5e832eb65a39882007665cc147b18fbc7a06f7855e07c9a2b1757cf774090557edd873c8195dc19e26a5863e978ca70c2ffdf39c503227c37907677cbc535715862ce2f200559ae7939a1060cf8d2a3175ea9a29a7bfd776c660a6eadaeb854a2dbd6013bc65fde87a55150c685cfe031b6ecd54f8a60a91ed4b034122baf504aae975626c39c2393c1f6313a1f1047e7258a055b2efc91135c430bca938e1532956c612c0d52f2615befd55213496b450c222ad06c62f3f1caa87a18774a15e53840e2f2c02e2372afdfd8092916255b83185502c2b297827c4c7db6c9e49206cb9fc6744e744f6a9c8f99573bd0be35b9c1c4def96f51523343fe6ef73df03d766ef442dc0de08fc45f0125ef5a1cf3927f791a6acfc20d9debf5dfcdb12dd23576784007444bd0449114f2ee6cbda6ea7ccaa7089fa88bdbae9b3b82cf384ff3bc03d9ea17923dfca0a61c02eedc762c5b4ec179582cf134831db6c0716abe5b74eecbd424bb1234764fd6ec10656706d76359c5e0cba4e03a2983d0e9f987bebdbe7ca305df2485e5f43a9d406cbedfbd3217ecc8cff78b73e7450f2ce96a3bdc06c243affadfdb1db94b262dd4735d5c4720928be6dbcc0a6855f17e083222e3ba4f3746dc9460b394b6c9cd4c8775fa8f3979bb0a1f7ece63a727be102be339c359da9ca2107d495a0fb8371099c4dd6f0f49312542077b7ed3c3c62f8d03fe20bdb1c8151c918d4f6b760b1e63d86d54c25ca2ecb9c432115874b1230384dc548fbcfd2d4dbef3f51380863eb37862104c20911c4bcd594f7de923c620b21357a592ae40f897d946ba77f7a2062bf1a2b08ea78d1bac6880a7c04bd4c5a817c0f159fb59a257019898cfbb390a2dd3c9ce476f50d36903ee9066428e42ef4258f836dfc236fc4424a14d40abf966f783c19e1f3d709bfdb0b96f0e98cf8d0fde38709b420998917fe017b3434713de7682bfbb1c60ea4acb67047ad0af4a5e980d8070138c2d1dd0ed24d2b0bf1ff83345b0e165b04494c6bf651917fb1ecc2052d59c4fcb16b00882b86ea535b5e895f1556cafff548813ed9ecd69a605260a7d613b928132ddcd921c2e0c3cb02dcf8d1fa4b9dfc50ca5c6b58cf057a4ba5f15f17f149c999c3b3a6e9a0ca71bd9df1742547fa99a3ca9bef7138d0a523ed75fc16ffbb8db66b18a4ea7587fe277f25015e301743c95473df8683518741f337fbe6f37517bae8d6f01dd3f664521312bc11c09f68e4ce1654afc8d6e80a514d112933f94647df6ddfc24e879bfb9ea6198de6a9ee49880a671858f9fb56ad16ed9315d3a104119b2a84c84c42adbd057919d8d778db5473f5281719fbc01a94ba77bce51d73a20b7776ae2d687adb85eed4b15f4c756a0a1d61e596e7cdd75d64b9005ea9538ce6300d2d8f7b851e30d1ba594da3ffef617aea1d9e111bcabd9a14f4b8b1c0dd43a1a7241c7024d469c0948175f8b9a32bf1124612e206b7d06d3fb391a9359c26e930213665133f4d9d1fb9f936afc4463f87ec6efe0df32b9b3e564fcf3f44b62f608ac52e8c24d2090e6b103345a3a74517e355c7e0334a4192a0092018b00bd4917cb0d86c9e19021ab29fb848c06022125e0c34399b1ee28ef6ac7e9836062ec89af05d191f234622ede0ceeb4fbdf00c3459b8d165e7c0fd9d601782b53f79e138cba90581b6016664d0aa290e8d9fe7c122cdbebcd2a53ea40e479254e16fa8c8b9f7286d0f1baf74e3755c5ca87fa06acd119e0a6ee3db0821138ebf8ede18217c066009f14f68800a0e746f23e7298a16506968a54a85dae2c6bbedf0dcbf2e9edbf883f6b94bdb035d6e875d8b5eb228b269b5ac60d1cd4447c5c875691c1e3812753b02de739ac7eaf59192888abfe32337186c97620c6b543727efcd29eb47cd876d0882663e4b88407e1e92f60aeb278f23faf5c2367e3e002ec57a35dbbe5894d905448d30ac59fa751b6493a6e70c8e6ded30f3648c297c0c6759c2dccfc9d22b1f442a89904049242587579940243a8f1b4a35ee7b973a03f4a875842271280677460104d1396bf694b60d32b00dac4d8dacf4db7c8b82f00eab73de3574e0d178f475ce0ce315a8a991388fb0c448657e384f98cb3b54ba2c7ace2806c8a04413ecda74f66aab99269737122cc2b386f784654c210e201f99b8bf65dcc40d7ee1bf135d43be2b3fe328f3c91a5ef69d4e6e6620e886ff6a0ed4398c1a938def9481222109c9b1dfd5e74eb30605e9d2afbbded2342b77ebd19fa60a3b784167fa16f0afa06b4398a2c9dab80c639844f72d871ef01c65941fc5c7027b1ffbf8f49f77382f37d13c1fd7b058cdb500754645081ee9d1f6116e5b95da0bfa3bcbfafceaec9b2bedc23bff4621daddbd0dc64daa7a48cf84ff5386022cc7dc591c9819a54611c14ae1590c8bdb5c02a4724809e60a34f9e977db2622be0b2c9bb54f986f8483c54be31210f970127f1fffb407eb124c95292bc7623c38dcc9d37bab91263067b67b12ba38af17e0e82e9805117a4a9c791fdb1307cff2cb225479885de935e65a1cbdbf7179d2339b80e024105321a4d6031c9f9be1bdb032bbf5a8edd359a634d397e9af4f069db89decd37f994344987930bdfe18cac9761fdb22184bd8e0b88a9bc2fb29cc5aa4e0e6a27ff1d3666c49347551e8798404f78efe42f88a7c5e93b31f7e77efc4e4c5a859c66ccb8ff4f056a53bf5f86b42f01343b9dfcff9df738899e91b78fd3", 0x1000}, {&(0x7f0000000540)="35cf95640d253cd8f65da8def623ab5802c7be602993d93924980ed5456f054025a50238421e0005b9ee83a23e0ebb9b6d54a82542f7ef4dc7edd19ba998a88e6330cebbe0fc1a33ce4fdc939e4aa2f43ee817b6285c94ea32f0b8c6d1758eeb5e17f91c7a1ba6fbb3fccc19918fea5ee06165b74086fb481104a8000677fd2267d2965b1694", 0x86}, {&(0x7f0000000600)="35faf47246cf9d499de5004ca8ebcbdd81b928208a82a2df8869e850aff0002b7c745cf4dfcb1d9adf48fbde031afd7f500359d8cfd076f87547ce700c814548250f6806813c2a5c5dd4581919c3c6d792b02d87a9235fd6473e1016bc8a28758fa927352d46a298bf7768531d4d63df3fefc0e55159fdf129c13fb61bb45184ff346f13eab0cbf540ca9b1947b06976a1de", 0x92}], 0x7, &(0x7f00000001c0)=[@ip_tos_u8={{0x11, 0x0, 0x1, 0x7f}}, @ip_ttl={{0x14, 0x0, 0x2, 0x5}}], 0x30}}, {{0x0, 0x0, &(0x7f0000000300)=[{&(0x7f0000000740)="389828f5b655dffd81be7a5ff345621fecee424644a30dd506248e903f6657575d12f98179bc20da28316d896e6e2706cee58da4ae7751cc4a54bd358b633e1086b3781631293037c470b18d43d8", 0x4e}], 0x1, &(0x7f00000007c0)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r9, @loopback, @private=0xa010102}}}, @ip_ttl={{0x14, 0x0, 0x2, 0xb11f}}], 0x38}}, {{0x0, 0x0, &(0x7f0000000a40)=[{&(0x7f0000000800)="f30e2411ea538b1d1a8ca18796e06f20a23aa732bdf1e8b7e3f9b6052cf82d27d425f7150fd30fa22f68bf71646832be85d136768e88f3606ecd5a0f325049aa72309b5d353d5fb20cf612c2bd663b97a4e21e99df53501f0acde9c470e59f8923213ae0125d3eab74f40bdb84084ae2e36db2c8d1440f5f817dc1a7b24bf73ecc0403cac3fd23d9bc9fd2004781afb9bb89574cc42a74805609840cf9d4507ea77f6b2c4850cbed2eada597fb8a5ea630de30cd4181dc58d1dd8ce2b74aaa7364e06c8760afa701f6c651e9163ada2e9f0a6ef8e8c3ec290212", 0xda}, {&(0x7f0000000900)="e90ec6165e978c486aa7f27ab9dd052249a2812f3b3253efdd4465bd4290263edddf142377b4dd2e3ea2a7fdb835e7bff47fe01382a7fc82311163e0ec5742e0d5adeb0b723bc3e81ecf22a3cb2a3d19c4a738efd5ff70153fca9ada3fda47f230f07151ee2754f9cd46622f145087fb5805d10b5a0f6c09a5cf6fe6d4c3aea526da5b51fb93961f9cc33eb617cedaf412b78befd60410eef9f21d01bc5b57533d76d3b6e938b038216d19fef50fde3604852fafdec414b11bba305acc2b7d89afb6a7d9d8d6230068ec61513934b1dbe9df29c0c8afcd0fbe66690689c47597127429998fa523466edeab17119871d9d4308578dd13a4b6d90cb26e4d9a", 0xfe}, {&(0x7f0000000a00)="60361e76bd584dc1c76a8c598161f72c42e050733fbf98560fa0cb7304", 0x1d}], 0x3, &(0x7f0000000a80)=[@ip_tos_int={{0x14}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x7f}}], 0x30}}], 0x3, 0x4000000) 17:03:32 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xcb270000) 17:03:32 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfdab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:32 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x10}]}, 0x3c}}, 0x0) 17:03:32 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2b8, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:32 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x6000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2847.980758][T22178] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2848.091283][T22195] "syz-executor.0" (22195) uses obsolete ecb(arc4) skcipher [ 2848.110894][T22178] bond654: entered promiscuous mode [ 2848.123136][T22178] 8021q: adding VLAN 0 to HW filter on device bond654 17:03:33 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xf0ffffff) [ 2848.143047][T22183] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2848.222879][T22183] bond1009: entered promiscuous mode [ 2848.229608][T22183] 8021q: adding VLAN 0 to HW filter on device bond1009 [ 2848.251707][T22182] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:03:33 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xffffa888) [ 2848.327943][T22182] bond777: entered promiscuous mode [ 2848.334143][T22182] 8021q: adding VLAN 0 to HW filter on device bond777 [ 2848.351146][T22184] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2848.422965][T22184] bond1079: entered promiscuous mode [ 2848.429336][T22184] 8021q: adding VLAN 0 to HW filter on device bond1079 [ 2848.445652][T22189] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:33 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x11}]}, 0x3c}}, 0x0) 17:03:33 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x6558}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:33 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfeab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2848.545892][T22192] bond1009: (slave bridge1075): making interface the new active one [ 2848.554458][T22192] bridge1075: entered promiscuous mode [ 2848.571009][T22192] bond1009: (slave bridge1075): Enslaving as an active interface with an up link 17:03:33 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xfffff000) [ 2848.721615][T22197] bond1079: (slave bridge1106): making interface the new active one [ 2848.732007][T22197] bridge1106: entered promiscuous mode [ 2848.757146][T22197] bond1079: (slave bridge1106): Enslaving as an active interface with an up link [ 2848.882312][T22218] bond655: entered promiscuous mode [ 2848.888372][T22218] 8021q: adding VLAN 0 to HW filter on device bond655 [ 2848.944565][T22216] bond778: entered promiscuous mode [ 2848.954591][T22216] 8021q: adding VLAN 0 to HW filter on device bond778 [ 2849.004135][T22220] bond1010: entered promiscuous mode [ 2849.011524][T22220] 8021q: adding VLAN 0 to HW filter on device bond1010 [ 2849.026455][T22224] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. [ 2849.154126][T22226] bond1010: (slave bridge1076): making interface the new active one [ 2849.163185][T22226] bridge1076: entered promiscuous mode [ 2849.184628][T22226] bond1010: (slave bridge1076): Enslaving as an active interface with an up link [ 2849.372477][T22191] lo speed is unknown, defaulting to 1000 17:03:36 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xf}]}, 0x3c}}, 0x0) 17:03:36 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2ca, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:36 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xffffff7f) 17:03:36 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x13}]}, 0x3c}}, 0x0) 17:03:36 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x8100}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:36 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfeff0000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2851.546940][T22235] validate_nla: 3 callbacks suppressed [ 2851.546965][T22235] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2851.652570][T22235] bond1080: entered promiscuous mode [ 2851.659706][T22235] 8021q: adding VLAN 0 to HW filter on device bond1080 17:03:36 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xffffff9e) [ 2851.696479][T22239] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2851.870565][T22239] bond656: entered promiscuous mode [ 2851.887523][T22239] 8021q: adding VLAN 0 to HW filter on device bond656 17:03:36 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xfffffff0) [ 2851.933020][T22244] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2852.061652][T22244] bond1011: entered promiscuous mode [ 2852.083358][T22244] 8021q: adding VLAN 0 to HW filter on device bond1011 [ 2852.124963][T22247] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 17:03:37 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xf0ffffffffffff) [ 2852.256888][T22247] bond495: entered promiscuous mode [ 2852.280461][T22247] 8021q: adding VLAN 0 to HW filter on device bond495 [ 2852.313305][T22243] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2852.440019][T22243] bond779: entered promiscuous mode 17:03:37 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x100000000000000) [ 2852.462353][T22243] 8021q: adding VLAN 0 to HW filter on device bond779 [ 2852.548072][T22248] bond1080: (slave bridge1107): making interface the new active one [ 2852.556317][T22248] bridge1107: entered promiscuous mode [ 2852.575952][T22248] bond1080: (slave bridge1107): Enslaving as an active interface with an up link 17:03:37 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2cc, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2852.601918][T22250] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:37 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x14}]}, 0x3c}}, 0x0) [ 2852.873929][T22252] bond1011: (slave bridge1077): making interface the new active one [ 2852.883780][T22252] bridge1077: entered promiscuous mode [ 2852.895392][T22252] bond1011: (slave bridge1077): Enslaving as an active interface with an up link [ 2852.906060][T22254] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:37 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xf}]}, 0x3c}}, 0x0) 17:03:37 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfeffffff, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:38 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xf000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:38 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x200000000000000) [ 2853.012603][T22282] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2853.146061][T22282] bond1081: entered promiscuous mode [ 2853.165799][T22282] 8021q: adding VLAN 0 to HW filter on device bond1081 [ 2853.307138][T22283] bond1081: (slave bridge1108): making interface the new active one [ 2853.321757][T22283] bridge1108: entered promiscuous mode 17:03:38 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2d2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2853.347410][T22283] bond1081: (slave bridge1108): Enslaving as an active interface with an up link [ 2853.379097][T22285] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2853.460524][T22285] bond657: entered promiscuous mode [ 2853.466405][T22285] 8021q: adding VLAN 0 to HW filter on device bond657 [ 2853.478829][T22286] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:38 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x2}]}, 0x3c}}, 0x0) [ 2853.523880][T22290] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2853.601690][T22290] bond1012: entered promiscuous mode [ 2853.607859][T22290] 8021q: adding VLAN 0 to HW filter on device bond1012 [ 2853.627806][T22292] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2853.668500][T22292] bond780: entered promiscuous mode [ 2853.674431][T22292] 8021q: adding VLAN 0 to HW filter on device bond780 [ 2853.698142][T22293] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 2853.754499][T22293] bond496: entered promiscuous mode [ 2853.760296][T22293] 8021q: adding VLAN 0 to HW filter on device bond496 17:03:38 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xff0f0000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2853.823913][T22297] bond1012: (slave bridge1078): making interface the new active one [ 2853.834052][T22297] bridge1078: entered promiscuous mode [ 2853.846311][T22297] bond1012: (slave bridge1078): Enslaving as an active interface with an up link 17:03:38 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x160000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2853.903795][T22299] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:38 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xf}]}, 0x3c}}, 0x0) 17:03:39 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x400000000000000) [ 2854.104459][T22303] bond1082: entered promiscuous mode [ 2854.114790][T22303] 8021q: adding VLAN 0 to HW filter on device bond1082 17:03:39 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2e0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2854.212572][T22305] bond1082: (slave bridge1109): making interface the new active one [ 2854.222873][T22305] bridge1109: entered promiscuous mode [ 2854.236622][T22305] bond1082: (slave bridge1109): Enslaving as an active interface with an up link [ 2854.332596][T22309] bond658: entered promiscuous mode [ 2854.338512][T22309] 8021q: adding VLAN 0 to HW filter on device bond658 17:03:39 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x3}]}, 0x3c}}, 0x0) [ 2854.383292][T22314] bond1013: entered promiscuous mode [ 2854.390221][T22314] 8021q: adding VLAN 0 to HW filter on device bond1013 [ 2854.441576][T22317] bond781: entered promiscuous mode [ 2854.447274][T22317] 8021q: adding VLAN 0 to HW filter on device bond781 [ 2854.507910][T22318] bond1013: (slave bridge1079): making interface the new active one [ 2854.518138][T22318] bridge1079: entered promiscuous mode [ 2854.532111][T22318] bond1013: (slave bridge1079): Enslaving as an active interface with an up link 17:03:39 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xf0ffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:39 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xff7f0000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2854.670353][T22322] bond497: entered promiscuous mode [ 2854.676136][T22322] 8021q: adding VLAN 0 to HW filter on device bond497 [ 2854.689944][T22326] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:39 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x13}]}, 0x3c}}, 0x0) 17:03:39 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x600000000000000) [ 2854.889147][T22330] bond1083: entered promiscuous mode [ 2854.895498][T22330] 8021q: adding VLAN 0 to HW filter on device bond1083 [ 2855.015345][T22332] bond1083: (slave bridge1110): making interface the new active one [ 2855.029698][T22332] bridge1110: entered promiscuous mode [ 2855.044068][T22332] bond1083: (slave bridge1110): Enslaving as an active interface with an up link 17:03:40 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2ea, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2855.110091][T22336] bond659: entered promiscuous mode [ 2855.124624][T22336] 8021q: adding VLAN 0 to HW filter on device bond659 17:03:40 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x4}]}, 0x3c}}, 0x0) [ 2855.214876][T22343] bond782: entered promiscuous mode [ 2855.238088][T22343] 8021q: adding VLAN 0 to HW filter on device bond782 [ 2855.290908][T22344] bond1014: entered promiscuous mode [ 2855.296662][T22344] 8021q: adding VLAN 0 to HW filter on device bond1014 17:03:40 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x1000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2855.468317][T22346] bond1014: (slave bridge1080): making interface the new active one [ 2855.488185][T22346] bridge1080: entered promiscuous mode 17:03:40 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xffab1600, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2855.520953][T22346] bond1014: (slave bridge1080): Enslaving as an active interface with an up link [ 2855.613588][T22349] bond498: entered promiscuous mode [ 2855.632774][T22349] 8021q: adding VLAN 0 to HW filter on device bond498 [ 2855.688392][T22352] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:40 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x13}]}, 0x3c}}, 0x0) 17:03:40 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x800000000000000) [ 2855.933577][T22363] bond1084: entered promiscuous mode [ 2855.940050][T22363] 8021q: adding VLAN 0 to HW filter on device bond1084 17:03:41 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2f2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2855.995134][T22366] bond1084: (slave bridge1111): making interface the new active one [ 2856.004218][T22366] bridge1111: entered promiscuous mode [ 2856.017340][T22366] bond1084: (slave bridge1111): Enslaving as an active interface with an up link [ 2856.077763][T22367] bond660: entered promiscuous mode [ 2856.089905][T22367] 8021q: adding VLAN 0 to HW filter on device bond660 17:03:41 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x5}]}, 0x3c}}, 0x0) [ 2856.224352][T22372] bond783: entered promiscuous mode [ 2856.235269][T22372] 8021q: adding VLAN 0 to HW filter on device bond783 17:03:41 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x2000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2856.336074][T22376] bond1015: entered promiscuous mode [ 2856.351655][T22376] 8021q: adding VLAN 0 to HW filter on device bond1015 [ 2856.418455][T22377] bond1015: (slave bridge1081): making interface the new active one [ 2856.427182][T22377] bridge1081: entered promiscuous mode [ 2856.442033][T22377] bond1015: (slave bridge1081): Enslaving as an active interface with an up link 17:03:41 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xffff0300, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2856.558697][T22384] bond499: entered promiscuous mode [ 2856.564368][T22384] 8021q: adding VLAN 0 to HW filter on device bond499 17:03:41 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xa00000000000000) [ 2856.641601][T22387] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2856.723457][T22391] validate_nla: 15 callbacks suppressed [ 2856.723481][T22391] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:41 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xf}]}, 0x3c}}, 0x0) [ 2856.827344][T22391] bond1085: entered promiscuous mode [ 2856.848017][T22391] 8021q: adding VLAN 0 to HW filter on device bond1085 [ 2856.948029][T22393] bond1085: (slave bridge1112): making interface the new active one [ 2856.956521][T22393] bridge1112: entered promiscuous mode [ 2856.970822][T22393] bond1085: (slave bridge1112): Enslaving as an active interface with an up link [ 2856.986745][T22395] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:03:41 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2f4, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2857.085178][T22395] bond661: entered promiscuous mode [ 2857.096190][T22395] 8021q: adding VLAN 0 to HW filter on device bond661 17:03:42 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x6}]}, 0x3c}}, 0x0) [ 2857.233211][T22401] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:03:42 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x4000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2857.253059][T22401] workqueue: Failed to create a rescuer kthread for wq "bond784": -EINTR [ 2857.346665][T22405] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2857.453242][T22405] bond1016: entered promiscuous mode [ 2857.469044][T22405] 8021q: adding VLAN 0 to HW filter on device bond1016 [ 2857.564579][T22406] bond1016: (slave bridge1082): making interface the new active one [ 2857.577367][T22406] bridge1082: entered promiscuous mode [ 2857.597750][T22406] bond1016: (slave bridge1082): Enslaving as an active interface with an up link 17:03:42 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xffffa888, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:42 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xc00000000000000) [ 2857.635541][T22415] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 2857.781895][T22415] bond500: entered promiscuous mode [ 2857.791005][T22415] 8021q: adding VLAN 0 to HW filter on device bond500 [ 2857.804083][T22416] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:42 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xf}]}, 0x3c}}, 0x0) [ 2857.834010][T22420] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:42 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2fa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2857.895437][T22420] bond1086: entered promiscuous mode [ 2857.902331][T22420] 8021q: adding VLAN 0 to HW filter on device bond1086 [ 2857.922320][T22424] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2858.026745][T22424] bond662: entered promiscuous mode [ 2858.034687][T22424] 8021q: adding VLAN 0 to HW filter on device bond662 17:03:43 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x7}]}, 0x3c}}, 0x0) [ 2858.116568][T22430] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2858.209783][T22430] bond784: entered promiscuous mode [ 2858.216575][T22430] 8021q: adding VLAN 0 to HW filter on device bond784 17:03:43 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x6000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2858.268135][T22435] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2858.357630][T22435] bond1017: entered promiscuous mode [ 2858.365486][T22435] 8021q: adding VLAN 0 to HW filter on device bond1017 [ 2858.457182][T22439] bond1017: (slave bridge1083): making interface the new active one [ 2858.469571][T22439] bridge1083: entered promiscuous mode [ 2858.491278][T22439] bond1017: (slave bridge1083): Enslaving as an active interface with an up link 17:03:43 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfffff000, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:43 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xe00000000000000) [ 2858.541788][T22446] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 2858.604837][T22446] bond501: entered promiscuous mode [ 2858.629178][T22446] 8021q: adding VLAN 0 to HW filter on device bond501 [ 2858.664005][T22449] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2858.843994][T22448] bond1087: entered promiscuous mode 17:03:43 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xb}]}, 0x3c}}, 0x0) [ 2858.876496][T22448] 8021q: adding VLAN 0 to HW filter on device bond1087 [ 2859.003372][T22450] bond1087: (slave bridge1113): making interface the new active one [ 2859.026552][T22450] bridge1113: entered promiscuous mode 17:03:44 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x300, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2859.050898][T22450] bond1087: (slave bridge1113): Enslaving as an active interface with an up link [ 2859.103791][T22453] workqueue: Failed to create a rescuer kthread for wq "bond663": -EINTR [ 2859.190705][T22477] bond1088: entered promiscuous mode [ 2859.205190][T22477] 8021q: adding VLAN 0 to HW filter on device bond1088 17:03:44 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x8}]}, 0x3c}}, 0x0) 17:03:44 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x8000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:44 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x1000000000000000) [ 2859.241727][T22458] workqueue: Failed to create a rescuer kthread for wq "bond785": -EINTR [ 2859.511998][T22467] bond1018: entered promiscuous mode [ 2859.541009][T22467] 8021q: adding VLAN 0 to HW filter on device bond1018 [ 2859.643384][T22470] bond1018: (slave bridge1084): making interface the new active one [ 2859.652762][T22470] bridge1084: entered promiscuous mode [ 2859.672583][T22470] bond1018: (slave bridge1084): Enslaving as an active interface with an up link 17:03:44 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xffffff7f, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2859.747016][T22472] bond502: entered promiscuous mode [ 2859.773566][T22472] 8021q: adding VLAN 0 to HW filter on device bond502 17:03:44 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0xb}]}, 0x3c}}, 0x0) [ 2859.827805][T22473] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:45 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x312, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2859.984392][T22480] bond1088: (slave bridge1114): making interface the new active one [ 2859.993727][T22480] bridge1114: entered promiscuous mode [ 2860.008016][T22480] bond1088: (slave bridge1114): Enslaving as an active interface with an up link [ 2860.114808][T22484] bond663: entered promiscuous mode [ 2860.132008][T22484] 8021q: adding VLAN 0 to HW filter on device bond663 17:03:45 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x9}]}, 0x3c}}, 0x0) [ 2860.257053][T22489] bond785: entered promiscuous mode [ 2860.265325][T22489] 8021q: adding VLAN 0 to HW filter on device bond785 17:03:45 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x6000000000000000) 17:03:45 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xa000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2860.451403][T22499] bond1019: entered promiscuous mode [ 2860.457572][T22499] 8021q: adding VLAN 0 to HW filter on device bond1019 17:03:45 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xffffff9e, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2860.586345][T22501] bond1019: (slave bridge1085): making interface the new active one [ 2860.596076][T22501] bridge1085: entered promiscuous mode [ 2860.611805][T22501] bond1019: (slave bridge1085): Enslaving as an active interface with an up link [ 2860.702740][T22503] bond503: entered promiscuous mode [ 2860.712489][T22503] 8021q: adding VLAN 0 to HW filter on device bond503 [ 2860.725652][T22505] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:45 executing program 0: sendmsg$NL80211_CMD_CONNECT(0xffffffffffffffff, &(0x7f00000001c0)={0x0, 0x0, 0x0}, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000080), 0xffffffffffffffff) sendmsg$NL80211_CMD_JOIN_MESH(r2, &(0x7f0000000440)={0x0, 0x0, &(0x7f0000000400)={&(0x7f0000000340)={0x20, r3, 0x31d, 0x0, 0x0, {{0x11}, {@void, @val={0xc, 0x117}}}}, 0x20}}, 0x0) sendmsg$NL80211_CMD_SET_INTERFACE(r0, &(0x7f00000002c0)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000280)={&(0x7f0000000200)={0x28, r3, 0x8, 0x70bd25, 0x25dfdbff, {{}, {@void, @void}}, [@NL80211_ATTR_MESH_ID={0xa}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x1}]}, 0x28}, 0x1, 0x0, 0x0, 0x4004050}, 0x8054) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r5, @ANYBLOB="01edff000000000004003b1c210008000300", @ANYRES32=r4, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001"], 0x448}}, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) preadv(r6, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) ioctl$EXT4_IOC_CLEAR_ES_CACHE(0xffffffffffffffff, 0x6628) r8 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r8, r7, 0x0, 0x10000a006) [ 2860.831901][T22508] bond1089: entered promiscuous mode [ 2860.837604][T22508] 8021q: adding VLAN 0 to HW filter on device bond1089 17:03:45 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x314, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2860.896469][T22510] bond1089: (slave bridge1115): making interface the new active one [ 2860.905645][T22510] bridge1115: entered promiscuous mode [ 2860.920748][T22510] bond1089: (slave bridge1115): Enslaving as an active interface with an up link [ 2861.023513][T22512] bond664: entered promiscuous mode [ 2861.029456][T22512] 8021q: adding VLAN 0 to HW filter on device bond664 17:03:46 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xb}]}, 0x3c}}, 0x0) [ 2861.090205][ T27] audit: type=1804 audit(1688403826.054:421): pid=22535 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1977/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 [ 2861.183253][T22520] bond786: entered promiscuous mode [ 2861.189783][T22520] 8021q: adding VLAN 0 to HW filter on device bond786 17:03:46 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x6558000000000000) 17:03:46 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xc000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2861.411822][T22528] bond1020: entered promiscuous mode [ 2861.420524][T22528] 8021q: adding VLAN 0 to HW filter on device bond1020 17:03:46 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xffffffa1, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2861.493178][T22529] bond1020: (slave bridge1086): making interface the new active one [ 2861.502326][T22529] bridge1086: entered promiscuous mode [ 2861.519750][T22529] bond1020: (slave bridge1086): Enslaving as an active interface with an up link [ 2861.590006][T22538] bond1090: entered promiscuous mode [ 2861.596070][T22538] 8021q: adding VLAN 0 to HW filter on device bond1090 [ 2861.663789][T22542] bond665: entered promiscuous mode [ 2861.671456][T22542] 8021q: adding VLAN 0 to HW filter on device bond665 [ 2861.746468][T22543] bond1090: (slave bridge1116): making interface the new active one [ 2861.764194][T22543] bridge1116: entered promiscuous mode [ 2861.778520][T22543] bond1090: (slave bridge1116): Enslaving as an active interface with an up link 17:03:46 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x31a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:46 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xc}]}, 0x3c}}, 0x0) [ 2861.836067][T22551] validate_nla: 16 callbacks suppressed [ 2861.836091][T22551] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2861.921589][T22551] bond787: entered promiscuous mode [ 2861.927292][T22551] 8021q: adding VLAN 0 to HW filter on device bond787 17:03:47 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x8100000000000000) 17:03:47 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xe000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2862.035357][T22558] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2862.144092][T22558] bond1021: entered promiscuous mode [ 2862.150991][T22558] 8021q: adding VLAN 0 to HW filter on device bond1021 17:03:47 executing program 0: sendmsg$NL80211_CMD_CONNECT(0xffffffffffffffff, &(0x7f00000001c0)={0x0, 0x0, 0x0}, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) (async) r2 = socket$nl_generic(0x10, 0x3, 0x10) (async) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000080), 0xffffffffffffffff) sendmsg$NL80211_CMD_JOIN_MESH(r2, &(0x7f0000000440)={0x0, 0x0, &(0x7f0000000400)={&(0x7f0000000340)={0x20, r3, 0x31d, 0x0, 0x0, {{0x11}, {@void, @val={0xc, 0x117}}}}, 0x20}}, 0x0) (async) sendmsg$NL80211_CMD_SET_INTERFACE(r0, &(0x7f00000002c0)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000280)={&(0x7f0000000200)={0x28, r3, 0x8, 0x70bd25, 0x25dfdbff, {{}, {@void, @void}}, [@NL80211_ATTR_MESH_ID={0xa}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x1}]}, 0x28}, 0x1, 0x0, 0x0, 0x4004050}, 0x8054) (async) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) (async) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r5, @ANYBLOB="01edff000000000004003b1c210008000300", @ANYRES32=r4, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001"], 0x448}}, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f0000000000)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) preadv(r6, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) (async) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) ioctl$EXT4_IOC_CLEAR_ES_CACHE(0xffffffffffffffff, 0x6628) (async) r8 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r8, r7, 0x0, 0x10000a006) 17:03:47 executing program 0: sendmsg$NL80211_CMD_CONNECT(0xffffffffffffffff, &(0x7f00000001c0)={0x0, 0x0, 0x0}, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000080), 0xffffffffffffffff) sendmsg$NL80211_CMD_JOIN_MESH(r2, &(0x7f0000000440)={0x0, 0x0, &(0x7f0000000400)={&(0x7f0000000340)={0x20, r3, 0x31d, 0x0, 0x0, {{0x11}, {@void, @val={0xc, 0x117}}}}, 0x20}}, 0x0) sendmsg$NL80211_CMD_SET_INTERFACE(r0, &(0x7f00000002c0)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000280)={&(0x7f0000000200)={0x28, r3, 0x8, 0x70bd25, 0x25dfdbff, {{}, {@void, @void}}, [@NL80211_ATTR_MESH_ID={0xa}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x1}]}, 0x28}, 0x1, 0x0, 0x0, 0x4004050}, 0x8054) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) r5 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r5, @ANYBLOB="01edff000000000004003b1c210008000300", @ANYRES32=r4, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001"], 0x448}}, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) preadv(r6, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) ioctl$EXT4_IOC_CLEAR_ES_CACHE(0xffffffffffffffff, 0x6628) r8 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r8, r7, 0x0, 0x10000a006) sendmsg$NL80211_CMD_CONNECT(0xffffffffffffffff, &(0x7f00000001c0)={0x0, 0x0, 0x0}, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) socket$inet6_udplite(0xa, 0x2, 0x88) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000080), 0xffffffffffffffff) (async) sendmsg$NL80211_CMD_JOIN_MESH(r2, &(0x7f0000000440)={0x0, 0x0, &(0x7f0000000400)={&(0x7f0000000340)={0x20, r3, 0x31d, 0x0, 0x0, {{0x11}, {@void, @val={0xc, 0x117}}}}, 0x20}}, 0x0) (async) sendmsg$NL80211_CMD_SET_INTERFACE(r0, &(0x7f00000002c0)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x2000000}, 0xc, &(0x7f0000000280)={&(0x7f0000000200)={0x28, r3, 0x8, 0x70bd25, 0x25dfdbff, {{}, {@void, @void}}, [@NL80211_ATTR_MESH_ID={0xa}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x1}]}, 0x28}, 0x1, 0x0, 0x0, 0x4004050}, 0x8054) (async) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00'}) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000000100)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r5, @ANYBLOB="01edff000000000004003b1c210008000300", @ANYRES32=r4, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001"], 0x448}}, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) (async) write$binfmt_script(r6, &(0x7f0000000000)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) (async) preadv(r6, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) (async) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) ioctl$EXT4_IOC_CLEAR_ES_CACHE(0xffffffffffffffff, 0x6628) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) sendfile(r8, r7, 0x0, 0x10000a006) (async) [ 2862.325718][T22559] bond1021: (slave bridge1087): making interface the new active one [ 2862.337015][T22559] bridge1087: entered promiscuous mode [ 2862.364494][T22559] bond1021: (slave bridge1087): Enslaving as an active interface with an up link 17:03:47 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xffffffe4, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2862.385676][T22563] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2862.451771][T22563] bond1091: entered promiscuous mode [ 2862.457752][T22563] 8021q: adding VLAN 0 to HW filter on device bond1091 [ 2862.540612][T22564] bond1091: (slave bridge1117): making interface the new active one [ 2862.549357][ T27] audit: type=1804 audit(1688403827.504:422): pid=22587 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1979/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 [ 2862.560425][T22564] bridge1117: entered promiscuous mode 17:03:47 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x322, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2862.598876][ T27] audit: type=1804 audit(1688403827.554:423): pid=22594 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=ToMToU comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/1979/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 [ 2862.611983][T22564] bond1091: (slave bridge1117): Enslaving as an active interface with an up link [ 2862.640748][T22566] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:03:47 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xd}]}, 0x3c}}, 0x0) [ 2862.724829][T22566] bond666: entered promiscuous mode [ 2862.730631][T22566] 8021q: adding VLAN 0 to HW filter on device bond666 [ 2862.757063][T22574] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2862.841086][T22574] bond788: entered promiscuous mode [ 2862.847570][T22574] 8021q: adding VLAN 0 to HW filter on device bond788 17:03:47 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x88a8ffff00000000) [ 2862.966140][T22590] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:03:47 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x10000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2863.065190][T22590] bond1022: entered promiscuous mode [ 2863.071601][T22590] 8021q: adding VLAN 0 to HW filter on device bond1022 17:03:48 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x9}]}, 0x3c}}, 0x0) [ 2863.141470][T22591] bond1022: (slave bridge1088): making interface the new active one [ 2863.152274][T22591] bridge1088: entered promiscuous mode [ 2863.165847][T22591] bond1022: (slave bridge1088): Enslaving as an active interface with an up link 17:03:48 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfffffff0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2863.247870][T22600] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2863.326256][T22600] bond1092: entered promiscuous mode [ 2863.332830][T22600] 8021q: adding VLAN 0 to HW filter on device bond1092 [ 2863.415023][T22601] bond1092: (slave bridge1118): making interface the new active one [ 2863.426030][T22601] bridge1118: entered promiscuous mode [ 2863.446851][T22601] bond1092: (slave bridge1118): Enslaving as an active interface with an up link 17:03:48 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x32c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2863.472370][T22605] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2863.532352][T22605] bond667: entered promiscuous mode [ 2863.538092][T22605] 8021q: adding VLAN 0 to HW filter on device bond667 17:03:48 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xe}]}, 0x3c}}, 0x0) 17:03:48 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x9effffff00000000) [ 2863.667752][T22614] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2863.730746][T22614] bond789: entered promiscuous mode [ 2863.736476][T22614] 8021q: adding VLAN 0 to HW filter on device bond789 [ 2863.784971][T22620] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 17:03:48 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x60000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2863.862262][T22620] bond504: entered promiscuous mode [ 2863.868102][T22620] 8021q: adding VLAN 0 to HW filter on device bond504 [ 2863.916677][T22623] bond1023: entered promiscuous mode [ 2863.922809][T22623] 8021q: adding VLAN 0 to HW filter on device bond1023 17:03:49 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xd}]}, 0x3c}}, 0x0) [ 2864.023232][T22625] bond1023: (slave bridge1089): making interface the new active one [ 2864.032748][T22625] bridge1089: entered promiscuous mode [ 2864.046504][T22625] bond1023: (slave bridge1089): Enslaving as an active interface with an up link 17:03:49 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xfffffffe, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2864.186110][T22628] bond1093: entered promiscuous mode [ 2864.193137][T22628] 8021q: adding VLAN 0 to HW filter on device bond1093 [ 2864.306032][T22630] bond1093: (slave bridge1119): making interface the new active one [ 2864.314935][T22630] bridge1119: entered promiscuous mode [ 2864.329003][T22630] bond1093: (slave bridge1119): Enslaving as an active interface with an up link 17:03:49 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x332, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:49 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xf}]}, 0x3c}}, 0x0) [ 2864.373522][T22633] bond668: entered promiscuous mode [ 2864.380217][T22633] 8021q: adding VLAN 0 to HW filter on device bond668 17:03:49 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xc80e000000000000) [ 2864.550703][T22643] bond790: entered promiscuous mode [ 2864.569392][T22643] 8021q: adding VLAN 0 to HW filter on device bond790 17:03:49 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x65580000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2864.742932][T22647] bond505: entered promiscuous mode [ 2864.756937][T22647] 8021q: adding VLAN 0 to HW filter on device bond505 [ 2864.844080][T22649] bond1024: entered promiscuous mode [ 2864.851677][T22649] 8021q: adding VLAN 0 to HW filter on device bond1024 17:03:49 executing program 0: bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0xfd63) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000080)={0x0}, 0x10) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) ioctl$ifreq_SIOCGIFINDEX_vcan(0xffffffffffffffff, 0x8933, 0x0) getsockopt$sock_cred(r0, 0x1, 0x11, &(0x7f0000000040), &(0x7f00000000c0)=0xc) socket$packet(0x11, 0x3, 0x300) connect$can_bcm(0xffffffffffffffff, &(0x7f0000000140), 0x10) ioctl$EXT4_IOC_CHECKPOINT(r0, 0x4004662b, &(0x7f0000000000)=0x2) [ 2864.977732][T22652] bond1024: (slave bridge1090): making interface the new active one [ 2865.002559][T22652] bridge1090: entered promiscuous mode 17:03:50 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0xffffffff, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2865.025397][T22652] bond1024: (slave bridge1090): Enslaving as an active interface with an up link [ 2865.053159][T22676] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow [ 2865.140099][T22658] bond1094: entered promiscuous mode [ 2865.163924][T22658] 8021q: adding VLAN 0 to HW filter on device bond1094 [ 2865.280765][T22660] bond669: entered promiscuous mode [ 2865.288074][T22660] 8021q: adding VLAN 0 to HW filter on device bond669 17:03:50 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x33c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2865.365760][T22663] bond1094: (slave bridge1120): making interface the new active one [ 2865.375072][T22663] bridge1120: entered promiscuous mode [ 2865.390124][T22663] bond1094: (slave bridge1120): Enslaving as an active interface with an up link 17:03:50 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x10}]}, 0x3c}}, 0x0) 17:03:50 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xcb27000000000000) 17:03:50 executing program 0: bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0xfd63) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000080)={0x0}, 0x10) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) ioctl$ifreq_SIOCGIFINDEX_vcan(0xffffffffffffffff, 0x8933, 0x0) (async) getsockopt$sock_cred(r0, 0x1, 0x11, &(0x7f0000000040), &(0x7f00000000c0)=0xc) socket$packet(0x11, 0x3, 0x300) connect$can_bcm(0xffffffffffffffff, &(0x7f0000000140), 0x10) ioctl$EXT4_IOC_CHECKPOINT(r0, 0x4004662b, &(0x7f0000000000)=0x2) [ 2865.633642][T22672] bond791: entered promiscuous mode [ 2865.639823][T22672] 8021q: adding VLAN 0 to HW filter on device bond791 17:03:50 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x81000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:50 executing program 0: bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0xfd63) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000080)={0x0}, 0x10) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) ioctl$ifreq_SIOCGIFINDEX_vcan(0xffffffffffffffff, 0x8933, 0x0) (async) getsockopt$sock_cred(r0, 0x1, 0x11, &(0x7f0000000040), &(0x7f00000000c0)=0xc) socket$packet(0x11, 0x3, 0x300) (async) connect$can_bcm(0xffffffffffffffff, &(0x7f0000000140), 0x10) (async) ioctl$EXT4_IOC_CHECKPOINT(r0, 0x4004662b, &(0x7f0000000000)=0x2) [ 2865.708160][T22698] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow [ 2865.807241][T22686] bond1025: entered promiscuous mode [ 2865.825234][T22686] 8021q: adding VLAN 0 to HW filter on device bond1025 [ 2865.933985][T22706] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow [ 2865.988255][T22688] bond1095: entered promiscuous mode [ 2866.006424][T22688] 8021q: adding VLAN 0 to HW filter on device bond1095 17:03:51 executing program 0: socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = gettid() sendmmsg$unix(r1, &(0x7f0000000300)=[{{0x0, 0x0, &(0x7f0000000280)=[{&(0x7f0000000100)="ff", 0x32880}], 0x5, &(0x7f00000002c0)=[@cred={{0x1c, 0x1, 0x2, {r2}}}, @rights={{0x14, 0x1, 0x1, [r0]}}], 0x38}}], 0x1, 0x0) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) r5 = gettid() sendmmsg$unix(r4, &(0x7f0000000300)=[{{0x0, 0x0, &(0x7f0000000280)=[{&(0x7f0000000100)="ff", 0x32880}], 0x5, &(0x7f00000002c0)=[@cred={{0x1c, 0x1, 0x2, {r5}}}, @rights={{0x14, 0x1, 0x1, [r3]}}], 0x38}}], 0x1, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r8 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000003c0)='cpuset.effective_cpus\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) sendfile(r7, r8, 0x0, 0xf03b0000) sendfile(r7, 0xffffffffffffffff, 0x0, 0x8000000000004) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x40440080}, 0xc, &(0x7f00000002c0)={&(0x7f0000000240)=@RTM_NEWNSID={0x54, 0x58, 0x300, 0x70bd2a, 0x25dfdbfd, {}, [@NETNSA_PID={0x8, 0x2, r2}, @NETNSA_NSID={0x8}, @NETNSA_PID={0x8, 0x2, r5}, @NETNSA_NSID={0x8, 0x1, 0x4}, @NETNSA_FD={0x8, 0x3, r6}, @NETNSA_FD={0x8}, @NETNSA_NSID={0x8, 0x1, 0x2}, @NETNSA_PID={0x8}]}, 0x54}, 0x1, 0x0, 0x0, 0x2004c011}, 0x10000041) r9 = syz_genetlink_get_family_id$l2tp(&(0x7f00000000c0), 0xffffffffffffffff) sendmsg$L2TP_CMD_TUNNEL_GET(0xffffffffffffffff, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x2c, r9, 0x20, 0x70bd25, 0x25dfdbfe, {}, [@L2TP_ATTR_PEER_SESSION_ID={0x8}, @L2TP_ATTR_L2SPEC_TYPE={0x5, 0x5, 0x1}, @L2TP_ATTR_SEND_SEQ={0x5, 0x13, 0x80}]}, 0x2c}, 0x1, 0x0, 0x0, 0x20000000}, 0x2010) r10 = socket$inet(0x2b, 0x801, 0x0) socket$nl_generic(0x10, 0x3, 0x10) setsockopt$IP_VS_SO_SET_ADD(r10, 0x0, 0x482, &(0x7f0000000000)={0x6, @empty, 0x0, 0x0, 'lblc\x00'}, 0x2c) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r11, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r11, 0x0) r12 = socket$inet6_sctp(0xa, 0x5, 0x84) r13 = socket$inet_sctp(0x2, 0x5, 0x84) getsockopt$inet_sctp_SCTP_SOCKOPT_CONNECTX3(r13, 0x84, 0x6f, &(0x7f0000002f00)={0x0, 0x10, &(0x7f0000002ec0)=[@in={0x2, 0x0, @initdev={0xac, 0x1e, 0x0, 0x0}}]}, &(0x7f0000002f40)=0x10) getsockopt$inet_sctp_SCTP_GET_ASSOC_ID_LIST(r13, 0x84, 0x1d, &(0x7f0000000380)=ANY=[@ANYBLOB="01005fdf32278581743bba0000", @ANYRES32=0x0], &(0x7f0000000040)=0x8) getsockopt$inet_sctp6_SCTP_DELAYED_SACK(r12, 0x84, 0x10, &(0x7f0000000240)=@assoc_value={r14}, &(0x7f0000000280)=0x8) getsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY(r11, 0x84, 0x18, &(0x7f00000001c0)={r14, 0x1}, &(0x7f0000000340)=0x8) setsockopt$inet_mreqn(r10, 0x0, 0x23, &(0x7f0000000040)={@private=0xa010102, @dev={0xac, 0x14, 0x14, 0x22}}, 0xc) [ 2866.054002][T22690] bond670: entered promiscuous mode [ 2866.060645][T22690] 8021q: adding VLAN 0 to HW filter on device bond670 [ 2866.200626][T22691] bond1025: (slave bridge1091): making interface the new active one [ 2866.212174][T22691] bridge1091: entered promiscuous mode [ 2866.233692][T22691] bond1025: (slave bridge1091): Enslaving as an active interface with an up link 17:03:51 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2866.272267][T22717] sctp: [Deprecated]: syz-executor.0 (pid 22717) Use of struct sctp_assoc_value in delayed_ack socket option. [ 2866.272267][T22717] Use struct sctp_sack_info instead [ 2866.367104][T22693] bond1095: (slave bridge1121): making interface the new active one [ 2866.386749][T22693] bridge1121: entered promiscuous mode [ 2866.412444][T22693] bond1095: (slave bridge1121): Enslaving as an active interface with an up link 17:03:51 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x342, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:51 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x11}]}, 0x3c}}, 0x0) 17:03:51 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xf0ffffff00000000) [ 2866.651707][T22704] bond792: entered promiscuous mode [ 2866.670900][T22704] 8021q: adding VLAN 0 to HW filter on device bond792 17:03:51 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x88a8ffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2866.747617][T22719] bond1026: entered promiscuous mode [ 2866.754385][T22719] 8021q: adding VLAN 0 to HW filter on device bond1026 [ 2866.815826][T22720] bond1026: (slave bridge1092): making interface the new active one [ 2866.824144][T22720] bridge1092: entered promiscuous mode [ 2866.838156][T22720] bond1026: (slave bridge1092): Enslaving as an active interface with an up link 17:03:51 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2866.882089][T22724] validate_nla: 15 callbacks suppressed [ 2866.882115][T22724] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:51 executing program 0: socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) (async) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) r2 = gettid() sendmmsg$unix(r1, &(0x7f0000000300)=[{{0x0, 0x0, &(0x7f0000000280)=[{&(0x7f0000000100)="ff", 0x32880}], 0x5, &(0x7f00000002c0)=[@cred={{0x1c, 0x1, 0x2, {r2}}}, @rights={{0x14, 0x1, 0x1, [r0]}}], 0x38}}], 0x1, 0x0) (async) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) (async, rerun: 64) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) (rerun: 64) r5 = gettid() sendmmsg$unix(r4, &(0x7f0000000300)=[{{0x0, 0x0, &(0x7f0000000280)=[{&(0x7f0000000100)="ff", 0x32880}], 0x5, &(0x7f00000002c0)=[@cred={{0x1c, 0x1, 0x2, {r5}}}, @rights={{0x14, 0x1, 0x1, [r3]}}], 0x38}}], 0x1, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r8 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000003c0)='cpuset.effective_cpus\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) (async) sendfile(r7, r8, 0x0, 0xf03b0000) (async) sendfile(r7, 0xffffffffffffffff, 0x0, 0x8000000000004) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x40440080}, 0xc, &(0x7f00000002c0)={&(0x7f0000000240)=@RTM_NEWNSID={0x54, 0x58, 0x300, 0x70bd2a, 0x25dfdbfd, {}, [@NETNSA_PID={0x8, 0x2, r2}, @NETNSA_NSID={0x8}, @NETNSA_PID={0x8, 0x2, r5}, @NETNSA_NSID={0x8, 0x1, 0x4}, @NETNSA_FD={0x8, 0x3, r6}, @NETNSA_FD={0x8}, @NETNSA_NSID={0x8, 0x1, 0x2}, @NETNSA_PID={0x8}]}, 0x54}, 0x1, 0x0, 0x0, 0x2004c011}, 0x10000041) r9 = syz_genetlink_get_family_id$l2tp(&(0x7f00000000c0), 0xffffffffffffffff) sendmsg$L2TP_CMD_TUNNEL_GET(0xffffffffffffffff, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x2c, r9, 0x20, 0x70bd25, 0x25dfdbfe, {}, [@L2TP_ATTR_PEER_SESSION_ID={0x8}, @L2TP_ATTR_L2SPEC_TYPE={0x5, 0x5, 0x1}, @L2TP_ATTR_SEND_SEQ={0x5, 0x13, 0x80}]}, 0x2c}, 0x1, 0x0, 0x0, 0x20000000}, 0x2010) r10 = socket$inet(0x2b, 0x801, 0x0) socket$nl_generic(0x10, 0x3, 0x10) (async, rerun: 32) setsockopt$IP_VS_SO_SET_ADD(r10, 0x0, 0x482, &(0x7f0000000000)={0x6, @empty, 0x0, 0x0, 'lblc\x00'}, 0x2c) (async, rerun: 32) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r11, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r11, 0x0) r12 = socket$inet6_sctp(0xa, 0x5, 0x84) (async) r13 = socket$inet_sctp(0x2, 0x5, 0x84) getsockopt$inet_sctp_SCTP_SOCKOPT_CONNECTX3(r13, 0x84, 0x6f, &(0x7f0000002f00)={0x0, 0x10, &(0x7f0000002ec0)=[@in={0x2, 0x0, @initdev={0xac, 0x1e, 0x0, 0x0}}]}, &(0x7f0000002f40)=0x10) getsockopt$inet_sctp_SCTP_GET_ASSOC_ID_LIST(r13, 0x84, 0x1d, &(0x7f0000000380)=ANY=[@ANYBLOB="01005fdf32278581743bba0000", @ANYRES32=0x0], &(0x7f0000000040)=0x8) (async) getsockopt$inet_sctp6_SCTP_DELAYED_SACK(r12, 0x84, 0x10, &(0x7f0000000240)=@assoc_value={r14}, &(0x7f0000000280)=0x8) (async) getsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY(r11, 0x84, 0x18, &(0x7f00000001c0)={r14, 0x1}, &(0x7f0000000340)=0x8) (async) setsockopt$inet_mreqn(r10, 0x0, 0x23, &(0x7f0000000040)={@private=0xa010102, @dev={0xac, 0x14, 0x14, 0x22}}, 0xc) [ 2866.988278][T22724] bond1096: entered promiscuous mode [ 2866.995627][T22724] 8021q: adding VLAN 0 to HW filter on device bond1096 [ 2867.015647][T22727] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2867.108568][T22727] bond671: entered promiscuous mode [ 2867.145789][T22727] 8021q: adding VLAN 0 to HW filter on device bond671 17:03:52 executing program 0: socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) gettid() (async) r2 = gettid() sendmmsg$unix(r1, &(0x7f0000000300)=[{{0x0, 0x0, &(0x7f0000000280)=[{&(0x7f0000000100)="ff", 0x32880}], 0x5, &(0x7f00000002c0)=[@cred={{0x1c, 0x1, 0x2, {r2}}}, @rights={{0x14, 0x1, 0x1, [r0]}}], 0x38}}], 0x1, 0x0) socketpair$unix(0x1, 0x2, 0x0, &(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) gettid() (async) r5 = gettid() sendmmsg$unix(r4, &(0x7f0000000300)=[{{0x0, 0x0, &(0x7f0000000280)=[{&(0x7f0000000100)="ff", 0x32880}], 0x5, &(0x7f00000002c0)=[@cred={{0x1c, 0x1, 0x2, {r5}}}, @rights={{0x14, 0x1, 0x1, [r3]}}], 0x38}}], 0x1, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r8 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000003c0)='cpuset.effective_cpus\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) sendfile(r7, r8, 0x0, 0xf03b0000) sendfile(r7, 0xffffffffffffffff, 0x0, 0x8000000000004) (async) sendfile(r7, 0xffffffffffffffff, 0x0, 0x8000000000004) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000300)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x40440080}, 0xc, &(0x7f00000002c0)={&(0x7f0000000240)=@RTM_NEWNSID={0x54, 0x58, 0x300, 0x70bd2a, 0x25dfdbfd, {}, [@NETNSA_PID={0x8, 0x2, r2}, @NETNSA_NSID={0x8}, @NETNSA_PID={0x8, 0x2, r5}, @NETNSA_NSID={0x8, 0x1, 0x4}, @NETNSA_FD={0x8, 0x3, r6}, @NETNSA_FD={0x8}, @NETNSA_NSID={0x8, 0x1, 0x2}, @NETNSA_PID={0x8}]}, 0x54}, 0x1, 0x0, 0x0, 0x2004c011}, 0x10000041) syz_genetlink_get_family_id$l2tp(&(0x7f00000000c0), 0xffffffffffffffff) (async) r9 = syz_genetlink_get_family_id$l2tp(&(0x7f00000000c0), 0xffffffffffffffff) sendmsg$L2TP_CMD_TUNNEL_GET(0xffffffffffffffff, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x2c, r9, 0x20, 0x70bd25, 0x25dfdbfe, {}, [@L2TP_ATTR_PEER_SESSION_ID={0x8}, @L2TP_ATTR_L2SPEC_TYPE={0x5, 0x5, 0x1}, @L2TP_ATTR_SEND_SEQ={0x5, 0x13, 0x80}]}, 0x2c}, 0x1, 0x0, 0x0, 0x20000000}, 0x2010) r10 = socket$inet(0x2b, 0x801, 0x0) socket$nl_generic(0x10, 0x3, 0x10) (async) socket$nl_generic(0x10, 0x3, 0x10) setsockopt$IP_VS_SO_SET_ADD(r10, 0x0, 0x482, &(0x7f0000000000)={0x6, @empty, 0x0, 0x0, 'lblc\x00'}, 0x2c) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r11, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r11, 0x0) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r11, 0x0) socket$inet6_sctp(0xa, 0x5, 0x84) (async) r12 = socket$inet6_sctp(0xa, 0x5, 0x84) r13 = socket$inet_sctp(0x2, 0x5, 0x84) getsockopt$inet_sctp_SCTP_SOCKOPT_CONNECTX3(r13, 0x84, 0x6f, &(0x7f0000002f00)={0x0, 0x10, &(0x7f0000002ec0)=[@in={0x2, 0x0, @initdev={0xac, 0x1e, 0x0, 0x0}}]}, &(0x7f0000002f40)=0x10) (async) getsockopt$inet_sctp_SCTP_SOCKOPT_CONNECTX3(r13, 0x84, 0x6f, &(0x7f0000002f00)={0x0, 0x10, &(0x7f0000002ec0)=[@in={0x2, 0x0, @initdev={0xac, 0x1e, 0x0, 0x0}}]}, &(0x7f0000002f40)=0x10) getsockopt$inet_sctp_SCTP_GET_ASSOC_ID_LIST(r13, 0x84, 0x1d, &(0x7f0000000380)=ANY=[@ANYBLOB="01005fdf32278581743bba0000", @ANYRES32=0x0], &(0x7f0000000040)=0x8) getsockopt$inet_sctp6_SCTP_DELAYED_SACK(r12, 0x84, 0x10, &(0x7f0000000240)=@assoc_value={r14}, &(0x7f0000000280)=0x8) getsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY(r11, 0x84, 0x18, &(0x7f00000001c0)={r14, 0x1}, &(0x7f0000000340)=0x8) (async) getsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY(r11, 0x84, 0x18, &(0x7f00000001c0)={r14, 0x1}, &(0x7f0000000340)=0x8) setsockopt$inet_mreqn(r10, 0x0, 0x23, &(0x7f0000000040)={@private=0xa010102, @dev={0xac, 0x14, 0x14, 0x22}}, 0xc) [ 2867.370733][T22731] bond1096: (slave bridge1122): making interface the new active one [ 2867.394562][T22731] bridge1122: entered promiscuous mode 17:03:52 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x34a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2867.415420][T22731] bond1096: (slave bridge1122): Enslaving as an active interface with an up link [ 2867.427934][T22735] netlink: 'syz-executor.4': attribute type 17 has an invalid length. 17:03:52 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x12}]}, 0x3c}}, 0x0) [ 2867.484540][T22756] sctp: [Deprecated]: syz-executor.0 (pid 22756) Use of struct sctp_assoc_value in delayed_ack socket option. [ 2867.484540][T22756] Use struct sctp_sack_info instead 17:03:52 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xffffff7f00000000) [ 2867.590813][T22739] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2867.682502][T22739] bond793: entered promiscuous mode [ 2867.692493][T22739] 8021q: adding VLAN 0 to HW filter on device bond793 [ 2867.731485][T22741] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:03:52 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x95383abc}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2867.807689][T22741] bond1027: entered promiscuous mode [ 2867.815080][T22741] 8021q: adding VLAN 0 to HW filter on device bond1027 17:03:52 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2867.903772][T22745] bond1027: (slave bridge1093): making interface the new active one [ 2867.913164][T22745] bridge1093: entered promiscuous mode [ 2867.927989][T22745] bond1027: (slave bridge1093): Enslaving as an active interface with an up link [ 2867.940164][T22758] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2868.025881][T22758] bond1097: entered promiscuous mode [ 2868.033693][T22758] 8021q: adding VLAN 0 to HW filter on device bond1097 [ 2868.104878][T22761] bond1097: (slave bridge1123): making interface the new active one [ 2868.114088][T22761] bridge1123: entered promiscuous mode [ 2868.142198][T22761] bond1097: (slave bridge1123): Enslaving as an active interface with an up link 17:03:53 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x352, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2868.152763][T22760] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:03:53 executing program 0: r0 = socket$alg(0x26, 0x5, 0x0) connect$netlink(0xffffffffffffffff, &(0x7f0000000100)=@unspec, 0xc) accept$alg(r0, 0x0, 0x0) read(r0, &(0x7f0000000000)=""/205, 0xcd) [ 2868.222016][T22760] bond672: entered promiscuous mode [ 2868.227640][T22760] 8021q: adding VLAN 0 to HW filter on device bond672 [ 2868.241837][T22767] netlink: 4 bytes leftover after parsing attributes in process `syz-executor.4'. 17:03:53 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x13}]}, 0x3c}}, 0x0) 17:03:53 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0xfffffffffffff000) [ 2868.272233][T22772] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2868.355098][T22772] bond794: entered promiscuous mode [ 2868.362567][T22772] 8021q: adding VLAN 0 to HW filter on device bond794 17:03:53 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0x9effffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:53 executing program 0: r0 = socket$alg(0x26, 0x5, 0x0) (async) connect$netlink(0xffffffffffffffff, &(0x7f0000000100)=@unspec, 0xc) accept$alg(r0, 0x0, 0x0) read(r0, &(0x7f0000000000)=""/205, 0xcd) [ 2868.482108][T22777] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:03:53 executing program 0: r0 = socket$alg(0x26, 0x5, 0x0) connect$netlink(0xffffffffffffffff, &(0x7f0000000100)=@unspec, 0xc) (async, rerun: 64) accept$alg(r0, 0x0, 0x0) (async, rerun: 64) read(r0, &(0x7f0000000000)=""/205, 0xcd) 17:03:53 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) setsockopt$netrom_NETROM_IDLE(r1, 0x103, 0x7, &(0x7f0000000000)=0x800, 0x4) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r3 = accept4(0xffffffffffffffff, 0x0, &(0x7f0000000140), 0x80800) r4 = socket$nl_route(0x10, 0x3, 0x0) r5 = socket(0x10, 0x3, 0x0) r6 = socket$nl_route(0x10, 0x3, 0x0) r7 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r7, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r7, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r6, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r8, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r5, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='D\x00\x00\x00,\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000000000000000f3ff00000800010065333200180002001400050000000000000002000000000000083c9b9e8607000000000000000080af7c1df1dd362fd54581d9279ea336589a53af26c70cc254d2426ffd70dc7d4fa6af2b6839fcd2680acf7e7bf2eedd9f1d42b774f3292900726ee7df0b17c6954fe260c43d2d58d297316bb38871c3443fd3c8ebaf6d29b56abf3f58b3884b9730be83ae96553668588cde79c476a1df44dfa29d42a0c9dcc493fa25fe8ff29cbbea945e3009ba45e88c9248d5e0595e59c3038a976487322e98a03284db72f251c36f6905270cae03ab1a752788332d42d744e2c942b8e290a7788365ec28a2bdf5c2cb89014142f3a3fb58e6cefb1a6ee2224a82b02b71c0239eb9cd"], 0x44}}, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) sendfile(r9, r10, 0x0, 0xf03b0000) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r11, r10, &(0x7f00000002c0)=0x335773c3, 0x8) getsockopt$inet_udp_int(r11, 0x11, 0x67, &(0x7f0000000280), &(0x7f00000002c0)=0x4) sendmsg$nl_route_sched(r3, &(0x7f0000000440)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x400000}, 0xc, &(0x7f0000000400)={&(0x7f0000000200)=@getqdisc={0x28, 0x26, 0x100, 0x70bd29, 0x25dfdbfd, {0x0, 0x0, 0x0, r8, {0xfff1, 0x8}, {0xfff2, 0x2}, {0xb, 0xb}}, [{0x4}]}, 0x28}, 0x1, 0x0, 0x0, 0x90}, 0x4) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000080)={&(0x7f00000007c0)=ANY=[@ANYBLOB="7c01000019000001c666174cb227bd7000ffdbdf251d010200150004000400002007010000c20fa634521344e40100000015000100040000e001020000e0e00c4cc4e027620100000015000400040000c004010000340c2e1387ba9b9f010000001e0106000813460008648a2197cce960346d712333f4a95a77739cbb8c6ab9a829fad2fc189422bff1165d5ff9edad1f8bc1bffb08e1f1487b19627c0d6eb7c57b9740ec317d992f1b91656f853783f969ea59be419b6b59a1c2e5b7ad0bad252eaf4c1749aac3d0014505b415ce5f7c7d4315f0226da79aeb1df255eeb335ea2ea24cae8489853fa0baa91895b2c2da32626486eeaf1e32d0bfa773e5cd25a1b5ad2837aa9b57e43976a53ad90f6e128ae3ab9a666bb4f2a7c32c053ffe21d64f55a1f21840a560d9c15287c83ad4bbe7fd2ef63f9d0d7e409d85efa329fee17ed0a4e84e763fb6514ac76d7af71cb92e67a41baf2e83b038f5aaec511827d91fe3bcc93043e15dc801c75f4c0ef9e4447fc195685638224a1d18f482b90000099568757e0bb6af73d3bbb4355cb5a94ce6435b42a143345aa8b046d6a2aef5d1588c647c3bf0133ea7db25f8f9e10144c611ac66d01bd029d42960213bd5b38d24df7c59ce6e8ee7e7a66ba6e38e0e0bbf00e5690350142ed3e28f4898636a776d1f51d53c27b7318e4f1cc08c7e297a03537afa5ed821a7c906dec60a5382ba9ab057c4b4ebd7a12c874421e6839c876a08e1310e1b6835c240abcc26f0ecf343ae1b742df2fb0d7278b0326400f77e3dd23d7fa418166e720affe2657e836058ef4106631eb533cf5d7df382f7c60f1956653d72ddc80c9c6f43a7a9c0fa78"], 0x17c}, 0x1, 0x0, 0x0, 0x44001}, 0x4000000) socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x34, 0x10, 0x0, 0x0, 0x2, {0x0, 0x0, 0x0, 0x0, 0x0, 0x10800}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}]}, 0x34}}, 0x800) [ 2868.653398][T22777] bond1028: entered promiscuous mode [ 2868.670844][T22777] 8021q: adding VLAN 0 to HW filter on device bond1028 17:03:53 executing program 0: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$netlbl_mgmt(&(0x7f00000001c0), r1) sendmsg$NLBL_MGMT_C_ADDDEF(r0, &(0x7f0000000640)={0x0, 0x0, &(0x7f0000000600)={&(0x7f0000000000)={0x2c, r2, 0x1, 0x0, 0x0, {}, [@NLBL_MGMT_A_IPV4ADDR={0x8, 0x7, @initdev={0xac, 0x1e, 0x0, 0x0}}, @NLBL_MGMT_A_PROTOCOL={0x8, 0x2, 0x5}, @NLBL_MGMT_A_IPV4MASK={0x8, 0x8, @initdev={0xac, 0x1e, 0x0, 0x0}}]}, 0x2c}, 0x1, 0x900}, 0x0) 17:03:53 executing program 0: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) r1 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$netlbl_mgmt(&(0x7f00000001c0), r1) sendmsg$NLBL_MGMT_C_ADDDEF(r0, &(0x7f0000000640)={0x0, 0x0, &(0x7f0000000600)={&(0x7f0000000000)={0x2c, r2, 0x1, 0x0, 0x0, {}, [@NLBL_MGMT_A_IPV4ADDR={0x8, 0x7, @initdev={0xac, 0x1e, 0x0, 0x0}}, @NLBL_MGMT_A_PROTOCOL={0x8, 0x2, 0x5}, @NLBL_MGMT_A_IPV4MASK={0x8, 0x8, @initdev={0xac, 0x1e, 0x0, 0x0}}]}, 0x2c}, 0x1, 0x900}, 0x0) [ 2868.920252][T22779] bond1028: (slave bridge1094): making interface the new active one [ 2868.944734][T22779] bridge1094: entered promiscuous mode 17:03:53 executing program 0: r0 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) r1 = syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$netlbl_mgmt(&(0x7f00000001c0), r1) sendmsg$NLBL_MGMT_C_ADDDEF(r0, &(0x7f0000000640)={0x0, 0x0, &(0x7f0000000600)={&(0x7f0000000000)={0x2c, r2, 0x1, 0x0, 0x0, {}, [@NLBL_MGMT_A_IPV4ADDR={0x8, 0x7, @initdev={0xac, 0x1e, 0x0, 0x0}}, @NLBL_MGMT_A_PROTOCOL={0x8, 0x2, 0x5}, @NLBL_MGMT_A_IPV4MASK={0x8, 0x8, @initdev={0xac, 0x1e, 0x0, 0x0}}]}, 0x2c}, 0x1, 0x900}, 0x0) [ 2868.978247][T22779] bond1028: (slave bridge1094): Enslaving as an active interface with an up link [ 2869.018097][T22782] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:54 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2869.142956][T22782] bond1098: entered promiscuous mode [ 2869.151012][T22782] 8021q: adding VLAN 0 to HW filter on device bond1098 [ 2869.263895][T22784] bond1098: (slave bridge1124): making interface the new active one [ 2869.272587][T22784] bridge1124: entered promiscuous mode [ 2869.287864][T22784] bond1098: (slave bridge1124): Enslaving as an active interface with an up link 17:03:54 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x35a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:54 executing program 0: bind$inet6(0xffffffffffffffff, 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x26e1, 0x0) r1 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x18, 0x3, &(0x7f00000000c0)=@framed, &(0x7f0000000080)='GPL\x00', 0xffffff7f, 0x0, 0x0, 0x0, 0x3, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='sched_switch\x00', r1}, 0x10) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_type(r2, &(0x7f0000000000), 0x248800) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) ioctl$EXT4_IOC_CHECKPOINT(r3, 0x4004662b, &(0x7f0000000000)=0x2) syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) ioctl$EXT4_IOC_CHECKPOINT(0xffffffffffffffff, 0x4004662b, &(0x7f0000000500)=0x5) sendto$inet6(r2, &(0x7f0000000680)="6ccfdf19b55dfe4241e28bc56b1d81431574b8e8d0f05b24ae6553869db76ea40656b7dfe9b4f09bb046e5611742d5246ebe5cc94f37a461c3ff33c9cfce58b54b42254b38e22572d4852ea46a9e56824a962c7cabfa40ce0dfa292c03475b69ea56ce427766e3bfb188e6f085ebda9ccaa19c18ed8960af88074c6d156d916bfbfba7d7823ec054fe96fe9f1cfd80622c1e12d232b608e638339543a49a6b0f0416d6a6b23824a9689d4d8c1653b2652b04cbff0d61f46b691404c675fd390000000000000000", 0xc7, 0x48001, 0x0, 0x0) getsockopt$inet6_tcp_buf(r3, 0x6, 0x0, &(0x7f0000000480)=""/81, 0x0) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) preadv(0xffffffffffffffff, &(0x7f0000000040)=[{&(0x7f0000000180)=""/98, 0x62}, {&(0x7f0000000280)=""/116, 0x74}, {&(0x7f0000000380)=""/98, 0x62}], 0x3, 0x400, 0x0) pipe(&(0x7f0000000640)={0xffffffffffffffff, 0xffffffffffffffff}) r5 = syz_genetlink_get_family_id$devlink(&(0x7f00000007c0), r0) sendmsg$DEVLINK_CMD_TRAP_POLICER_GET(r4, &(0x7f00000008c0)={&(0x7f0000000780), 0xc, &(0x7f0000000880)={&(0x7f0000000800)={0x3c, r5, 0x400, 0x70bd2b, 0x25dfdbff, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}]}, 0x3c}, 0x1, 0x0, 0x0, 0x20000000}, 0xc0000) setsockopt$inet6_icmp_ICMP_FILTER(r3, 0x1, 0x1, &(0x7f0000000100)={0x6}, 0x4) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000300)={0x0, 0x4, &(0x7f0000000080)=@framed={{0x18, 0x0, 0x0, 0x0, 0x3000}, [@alu={0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffffffffffff}]}, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) sendfile(0xffffffffffffffff, 0xffffffffffffffff, &(0x7f00000002c0)=0x335773c3, 0x8) r6 = syz_init_net_socket$nfc_llcp(0x27, 0x1, 0x1) splice(0xffffffffffffffff, &(0x7f0000000540)=0x4, r6, &(0x7f0000000840)=0x8, 0x6, 0x1) syz_emit_ethernet(0x5e, &(0x7f0000000900)=ANY=[@ANYBLOB="09ff73bdffff6c7621d7cc9486dd60fec00000283a0000000000000000000000ffffac1414bbff0200000000000000000000000000018900907800000000fc00000000000000000000000000e72cbf4a0b24138b19bc4a00000000000001e2813f908556af08623faed6acce540f951d1013adc0c2b6761d055d4052e6dba79f75491e800594b62ee701cb6bb25fa90c53ceffff4b39ca8cbef63e7dd4f422cbde009fe979e5550f6a28beacd23240647995d1fa9a5b53953ad9022eb6"], 0x0) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) socket$nl_generic(0x10, 0x3, 0x10) r7 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_rx_ring(r7, 0x107, 0x5, &(0x7f0000000040)=@req3={0x8000, 0x200, 0x80, 0x20000}, 0x1c) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000580)="b33fb1bcc8c09576f3bc4d4dd6cc1b86cbefb0286aabbcb4933e2d30246696f2f17ff96e934bd49d469a0cba6c2275a2f2dfa1cf1d3f8f38d841f5be4f772e1a7369503592ac7387457b1f3a36668da6c5df0eef3b7d6fb5b37362b1ec82d32521665b851e0e861d549fec16a3fa620ebc1062d595f7623feaffdb79793c3458231ddfeda7773c6c820a90d754513ac22c37a7ff92186f6d1f026b737954680c5e997bdd67303b398c8ee43a35abc57950", 0xb1) mmap(&(0x7f0000568000/0x2000)=nil, 0x1000000, 0x0, 0x11, r7, 0x0) 17:03:54 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x14}]}, 0x3c}}, 0x0) [ 2869.302708][T22787] workqueue: Failed to create a rescuer kthread for wq "bond673": -EINTR [ 2869.440281][T22833] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow 17:03:54 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xbc3a3895}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:54 executing program 0: bind$inet6(0xffffffffffffffff, 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x26e1, 0x0) (async) r1 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x18, 0x3, &(0x7f00000000c0)=@framed, &(0x7f0000000080)='GPL\x00', 0xffffff7f, 0x0, 0x0, 0x0, 0x3, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='sched_switch\x00', r1}, 0x10) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_type(r2, &(0x7f0000000000), 0x248800) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) ioctl$EXT4_IOC_CHECKPOINT(r3, 0x4004662b, &(0x7f0000000000)=0x2) syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) (async) ioctl$EXT4_IOC_CHECKPOINT(0xffffffffffffffff, 0x4004662b, &(0x7f0000000500)=0x5) (async) sendto$inet6(r2, &(0x7f0000000680)="6ccfdf19b55dfe4241e28bc56b1d81431574b8e8d0f05b24ae6553869db76ea40656b7dfe9b4f09bb046e5611742d5246ebe5cc94f37a461c3ff33c9cfce58b54b42254b38e22572d4852ea46a9e56824a962c7cabfa40ce0dfa292c03475b69ea56ce427766e3bfb188e6f085ebda9ccaa19c18ed8960af88074c6d156d916bfbfba7d7823ec054fe96fe9f1cfd80622c1e12d232b608e638339543a49a6b0f0416d6a6b23824a9689d4d8c1653b2652b04cbff0d61f46b691404c675fd390000000000000000", 0xc7, 0x48001, 0x0, 0x0) (async) getsockopt$inet6_tcp_buf(r3, 0x6, 0x0, &(0x7f0000000480)=""/81, 0x0) (async) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) (async) preadv(0xffffffffffffffff, &(0x7f0000000040)=[{&(0x7f0000000180)=""/98, 0x62}, {&(0x7f0000000280)=""/116, 0x74}, {&(0x7f0000000380)=""/98, 0x62}], 0x3, 0x400, 0x0) pipe(&(0x7f0000000640)={0xffffffffffffffff, 0xffffffffffffffff}) (async) r5 = syz_genetlink_get_family_id$devlink(&(0x7f00000007c0), r0) sendmsg$DEVLINK_CMD_TRAP_POLICER_GET(r4, &(0x7f00000008c0)={&(0x7f0000000780), 0xc, &(0x7f0000000880)={&(0x7f0000000800)={0x3c, r5, 0x400, 0x70bd2b, 0x25dfdbff, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}]}, 0x3c}, 0x1, 0x0, 0x0, 0x20000000}, 0xc0000) (async) setsockopt$inet6_icmp_ICMP_FILTER(r3, 0x1, 0x1, &(0x7f0000000100)={0x6}, 0x4) (async) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000300)={0x0, 0x4, &(0x7f0000000080)=@framed={{0x18, 0x0, 0x0, 0x0, 0x3000}, [@alu={0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffffffffffff}]}, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) (async) sendfile(0xffffffffffffffff, 0xffffffffffffffff, &(0x7f00000002c0)=0x335773c3, 0x8) (async) r6 = syz_init_net_socket$nfc_llcp(0x27, 0x1, 0x1) splice(0xffffffffffffffff, &(0x7f0000000540)=0x4, r6, &(0x7f0000000840)=0x8, 0x6, 0x1) (async) syz_emit_ethernet(0x5e, &(0x7f0000000900)=ANY=[@ANYBLOB="09ff73bdffff6c7621d7cc9486dd60fec00000283a0000000000000000000000ffffac1414bbff0200000000000000000000000000018900907800000000fc00000000000000000000000000e72cbf4a0b24138b19bc4a00000000000001e2813f908556af08623faed6acce540f951d1013adc0c2b6761d055d4052e6dba79f75491e800594b62ee701cb6bb25fa90c53ceffff4b39ca8cbef63e7dd4f422cbde009fe979e5550f6a28beacd23240647995d1fa9a5b53953ad9022eb6"], 0x0) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) r7 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_rx_ring(r7, 0x107, 0x5, &(0x7f0000000040)=@req3={0x8000, 0x200, 0x80, 0x20000}, 0x1c) (async) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000580)="b33fb1bcc8c09576f3bc4d4dd6cc1b86cbefb0286aabbcb4933e2d30246696f2f17ff96e934bd49d469a0cba6c2275a2f2dfa1cf1d3f8f38d841f5be4f772e1a7369503592ac7387457b1f3a36668da6c5df0eef3b7d6fb5b37362b1ec82d32521665b851e0e861d549fec16a3fa620ebc1062d595f7623feaffdb79793c3458231ddfeda7773c6c820a90d754513ac22c37a7ff92186f6d1f026b737954680c5e997bdd67303b398c8ee43a35abc57950", 0xb1) (async) mmap(&(0x7f0000568000/0x2000)=nil, 0x1000000, 0x0, 0x11, r7, 0x0) [ 2869.456269][T22799] workqueue: Failed to create a rescuer kthread for wq "bond795": -EINTR [ 2869.834358][T22813] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.3'. 17:03:54 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) setsockopt$netrom_NETROM_IDLE(r1, 0x103, 0x7, &(0x7f0000000000)=0x800, 0x4) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r3 = accept4(0xffffffffffffffff, 0x0, &(0x7f0000000140), 0x80800) (async) r4 = socket$nl_route(0x10, 0x3, 0x0) (async) r5 = socket(0x10, 0x3, 0x0) r6 = socket$nl_route(0x10, 0x3, 0x0) r7 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r7, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) (async) getsockname$packet(r7, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r6, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r8, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r5, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) (async) sendmsg$nl_route_sched(r4, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='D\x00\x00\x00,\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000000000000000f3ff00000800010065333200180002001400050000000000000002000000000000083c9b9e8607000000000000000080af7c1df1dd362fd54581d9279ea336589a53af26c70cc254d2426ffd70dc7d4fa6af2b6839fcd2680acf7e7bf2eedd9f1d42b774f3292900726ee7df0b17c6954fe260c43d2d58d297316bb38871c3443fd3c8ebaf6d29b56abf3f58b3884b9730be83ae96553668588cde79c476a1df44dfa29d42a0c9dcc493fa25fe8ff29cbbea945e3009ba45e88c9248d5e0595e59c3038a976487322e98a03284db72f251c36f6905270cae03ab1a752788332d42d744e2c942b8e290a7788365ec28a2bdf5c2cb89014142f3a3fb58e6cefb1a6ee2224a82b02b71c0239eb9cd"], 0x44}}, 0x0) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) (async) sendfile(r9, r10, 0x0, 0xf03b0000) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r11, r10, &(0x7f00000002c0)=0x335773c3, 0x8) getsockopt$inet_udp_int(r11, 0x11, 0x67, &(0x7f0000000280), &(0x7f00000002c0)=0x4) (async) sendmsg$nl_route_sched(r3, &(0x7f0000000440)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x400000}, 0xc, &(0x7f0000000400)={&(0x7f0000000200)=@getqdisc={0x28, 0x26, 0x100, 0x70bd29, 0x25dfdbfd, {0x0, 0x0, 0x0, r8, {0xfff1, 0x8}, {0xfff2, 0x2}, {0xb, 0xb}}, [{0x4}]}, 0x28}, 0x1, 0x0, 0x0, 0x90}, 0x4) (async) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) (async) sendmsg$nl_route(r2, &(0x7f00000000c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000080)={&(0x7f00000007c0)=ANY=[@ANYBLOB="7c01000019000001c666174cb227bd7000ffdbdf251d010200150004000400002007010000c20fa634521344e40100000015000100040000e001020000e0e00c4cc4e027620100000015000400040000c004010000340c2e1387ba9b9f010000001e0106000813460008648a2197cce960346d712333f4a95a77739cbb8c6ab9a829fad2fc189422bff1165d5ff9edad1f8bc1bffb08e1f1487b19627c0d6eb7c57b9740ec317d992f1b91656f853783f969ea59be419b6b59a1c2e5b7ad0bad252eaf4c1749aac3d0014505b415ce5f7c7d4315f0226da79aeb1df255eeb335ea2ea24cae8489853fa0baa91895b2c2da32626486eeaf1e32d0bfa773e5cd25a1b5ad2837aa9b57e43976a53ad90f6e128ae3ab9a666bb4f2a7c32c053ffe21d64f55a1f21840a560d9c15287c83ad4bbe7fd2ef63f9d0d7e409d85efa329fee17ed0a4e84e763fb6514ac76d7af71cb92e67a41baf2e83b038f5aaec511827d91fe3bcc93043e15dc801c75f4c0ef9e4447fc195685638224a1d18f482b90000099568757e0bb6af73d3bbb4355cb5a94ce6435b42a143345aa8b046d6a2aef5d1588c647c3bf0133ea7db25f8f9e10144c611ac66d01bd029d42960213bd5b38d24df7c59ce6e8ee7e7a66ba6e38e0e0bbf00e5690350142ed3e28f4898636a776d1f51d53c27b7318e4f1cc08c7e297a03537afa5ed821a7c906dec60a5382ba9ab057c4b4ebd7a12c874421e6839c876a08e1310e1b6835c240abcc26f0ecf343ae1b742df2fb0d7278b0326400f77e3dd23d7fa418166e720affe2657e836058ef4106631eb533cf5d7df382f7c60f1956653d72ddc80c9c6f43a7a9c0fa78"], 0x17c}, 0x1, 0x0, 0x0, 0x44001}, 0x4000000) (async) socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x34, 0x10, 0x0, 0x0, 0x2, {0x0, 0x0, 0x0, 0x0, 0x0, 0x10800}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}]}, 0x34}}, 0x800) [ 2869.903370][T22852] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow [ 2870.030513][T22829] bond1029: entered promiscuous mode 17:03:55 executing program 0: bind$inet6(0xffffffffffffffff, 0x0, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x26e1, 0x0) (async, rerun: 32) r1 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x18, 0x3, &(0x7f00000000c0)=@framed, &(0x7f0000000080)='GPL\x00', 0xffffff7f, 0x0, 0x0, 0x0, 0x3, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) (rerun: 32) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='sched_switch\x00', r1}, 0x10) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_type(r2, &(0x7f0000000000), 0x248800) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) ioctl$EXT4_IOC_CHECKPOINT(r3, 0x4004662b, &(0x7f0000000000)=0x2) syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) ioctl$EXT4_IOC_CHECKPOINT(0xffffffffffffffff, 0x4004662b, &(0x7f0000000500)=0x5) (async, rerun: 32) sendto$inet6(r2, &(0x7f0000000680)="6ccfdf19b55dfe4241e28bc56b1d81431574b8e8d0f05b24ae6553869db76ea40656b7dfe9b4f09bb046e5611742d5246ebe5cc94f37a461c3ff33c9cfce58b54b42254b38e22572d4852ea46a9e56824a962c7cabfa40ce0dfa292c03475b69ea56ce427766e3bfb188e6f085ebda9ccaa19c18ed8960af88074c6d156d916bfbfba7d7823ec054fe96fe9f1cfd80622c1e12d232b608e638339543a49a6b0f0416d6a6b23824a9689d4d8c1653b2652b04cbff0d61f46b691404c675fd390000000000000000", 0xc7, 0x48001, 0x0, 0x0) (rerun: 32) getsockopt$inet6_tcp_buf(r3, 0x6, 0x0, &(0x7f0000000480)=""/81, 0x0) (async) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) (async) preadv(0xffffffffffffffff, &(0x7f0000000040)=[{&(0x7f0000000180)=""/98, 0x62}, {&(0x7f0000000280)=""/116, 0x74}, {&(0x7f0000000380)=""/98, 0x62}], 0x3, 0x400, 0x0) (async, rerun: 32) pipe(&(0x7f0000000640)={0xffffffffffffffff, 0xffffffffffffffff}) (async, rerun: 32) r5 = syz_genetlink_get_family_id$devlink(&(0x7f00000007c0), r0) sendmsg$DEVLINK_CMD_TRAP_POLICER_GET(r4, &(0x7f00000008c0)={&(0x7f0000000780), 0xc, &(0x7f0000000880)={&(0x7f0000000800)={0x3c, r5, 0x400, 0x70bd2b, 0x25dfdbff, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}]}, 0x3c}, 0x1, 0x0, 0x0, 0x20000000}, 0xc0000) (async) setsockopt$inet6_icmp_ICMP_FILTER(r3, 0x1, 0x1, &(0x7f0000000100)={0x6}, 0x4) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000300)={0x0, 0x4, &(0x7f0000000080)=@framed={{0x18, 0x0, 0x0, 0x0, 0x3000}, [@alu={0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffffffffffff}]}, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) (async, rerun: 64) sendfile(0xffffffffffffffff, 0xffffffffffffffff, &(0x7f00000002c0)=0x335773c3, 0x8) (async, rerun: 64) r6 = syz_init_net_socket$nfc_llcp(0x27, 0x1, 0x1) splice(0xffffffffffffffff, &(0x7f0000000540)=0x4, r6, &(0x7f0000000840)=0x8, 0x6, 0x1) (async, rerun: 32) syz_emit_ethernet(0x5e, &(0x7f0000000900)=ANY=[@ANYBLOB="09ff73bdffff6c7621d7cc9486dd60fec00000283a0000000000000000000000ffffac1414bbff0200000000000000000000000000018900907800000000fc00000000000000000000000000e72cbf4a0b24138b19bc4a00000000000001e2813f908556af08623faed6acce540f951d1013adc0c2b6761d055d4052e6dba79f75491e800594b62ee701cb6bb25fa90c53ceffff4b39ca8cbef63e7dd4f422cbde009fe979e5550f6a28beacd23240647995d1fa9a5b53953ad9022eb6"], 0x0) (async, rerun: 32) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) (async, rerun: 64) socket$nl_generic(0x10, 0x3, 0x10) (rerun: 64) r7 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_rx_ring(r7, 0x107, 0x5, &(0x7f0000000040)=@req3={0x8000, 0x200, 0x80, 0x20000}, 0x1c) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000580)="b33fb1bcc8c09576f3bc4d4dd6cc1b86cbefb0286aabbcb4933e2d30246696f2f17ff96e934bd49d469a0cba6c2275a2f2dfa1cf1d3f8f38d841f5be4f772e1a7369503592ac7387457b1f3a36668da6c5df0eef3b7d6fb5b37362b1ec82d32521665b851e0e861d549fec16a3fa620ebc1062d595f7623feaffdb79793c3458231ddfeda7773c6c820a90d754513ac22c37a7ff92186f6d1f026b737954680c5e997bdd67303b398c8ee43a35abc57950", 0xb1) (async, rerun: 64) mmap(&(0x7f0000568000/0x2000)=nil, 0x1000000, 0x0, 0x11, r7, 0x0) (rerun: 64) [ 2870.073442][T22829] 8021q: adding VLAN 0 to HW filter on device bond1029 17:03:55 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async, rerun: 32) setsockopt$netrom_NETROM_IDLE(r1, 0x103, 0x7, &(0x7f0000000000)=0x800, 0x4) (async, rerun: 32) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r3 = accept4(0xffffffffffffffff, 0x0, &(0x7f0000000140), 0x80800) (async) r4 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 32) r5 = socket(0x10, 0x3, 0x0) (async, rerun: 32) r6 = socket$nl_route(0x10, 0x3, 0x0) (async) r7 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r7, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r7, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r6, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r8, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r5, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) (async) sendmsg$nl_route_sched(r4, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='D\x00\x00\x00,\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000000000000000f3ff00000800010065333200180002001400050000000000000002000000000000083c9b9e8607000000000000000080af7c1df1dd362fd54581d9279ea336589a53af26c70cc254d2426ffd70dc7d4fa6af2b6839fcd2680acf7e7bf2eedd9f1d42b774f3292900726ee7df0b17c6954fe260c43d2d58d297316bb38871c3443fd3c8ebaf6d29b56abf3f58b3884b9730be83ae96553668588cde79c476a1df44dfa29d42a0c9dcc493fa25fe8ff29cbbea945e3009ba45e88c9248d5e0595e59c3038a976487322e98a03284db72f251c36f6905270cae03ab1a752788332d42d744e2c942b8e290a7788365ec28a2bdf5c2cb89014142f3a3fb58e6cefb1a6ee2224a82b02b71c0239eb9cd"], 0x44}}, 0x0) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) (async) sendfile(r9, r10, 0x0, 0xf03b0000) (async) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r11, r10, &(0x7f00000002c0)=0x335773c3, 0x8) (async) getsockopt$inet_udp_int(r11, 0x11, 0x67, &(0x7f0000000280), &(0x7f00000002c0)=0x4) sendmsg$nl_route_sched(r3, &(0x7f0000000440)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x400000}, 0xc, &(0x7f0000000400)={&(0x7f0000000200)=@getqdisc={0x28, 0x26, 0x100, 0x70bd29, 0x25dfdbfd, {0x0, 0x0, 0x0, r8, {0xfff1, 0x8}, {0xfff2, 0x2}, {0xb, 0xb}}, [{0x4}]}, 0x28}, 0x1, 0x0, 0x0, 0x90}, 0x4) (async) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) (async) sendmsg$nl_route(r2, &(0x7f00000000c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000080)={&(0x7f00000007c0)=ANY=[@ANYBLOB="7c01000019000001c666174cb227bd7000ffdbdf251d010200150004000400002007010000c20fa634521344e40100000015000100040000e001020000e0e00c4cc4e027620100000015000400040000c004010000340c2e1387ba9b9f010000001e0106000813460008648a2197cce960346d712333f4a95a77739cbb8c6ab9a829fad2fc189422bff1165d5ff9edad1f8bc1bffb08e1f1487b19627c0d6eb7c57b9740ec317d992f1b91656f853783f969ea59be419b6b59a1c2e5b7ad0bad252eaf4c1749aac3d0014505b415ce5f7c7d4315f0226da79aeb1df255eeb335ea2ea24cae8489853fa0baa91895b2c2da32626486eeaf1e32d0bfa773e5cd25a1b5ad2837aa9b57e43976a53ad90f6e128ae3ab9a666bb4f2a7c32c053ffe21d64f55a1f21840a560d9c15287c83ad4bbe7fd2ef63f9d0d7e409d85efa329fee17ed0a4e84e763fb6514ac76d7af71cb92e67a41baf2e83b038f5aaec511827d91fe3bcc93043e15dc801c75f4c0ef9e4447fc195685638224a1d18f482b90000099568757e0bb6af73d3bbb4355cb5a94ce6435b42a143345aa8b046d6a2aef5d1588c647c3bf0133ea7db25f8f9e10144c611ac66d01bd029d42960213bd5b38d24df7c59ce6e8ee7e7a66ba6e38e0e0bbf00e5690350142ed3e28f4898636a776d1f51d53c27b7318e4f1cc08c7e297a03537afa5ed821a7c906dec60a5382ba9ab057c4b4ebd7a12c874421e6839c876a08e1310e1b6835c240abcc26f0ecf343ae1b742df2fb0d7278b0326400f77e3dd23d7fa418166e720affe2657e836058ef4106631eb533cf5d7df382f7c60f1956653d72ddc80c9c6f43a7a9c0fa78"], 0x17c}, 0x1, 0x0, 0x0, 0x44001}, 0x4000000) (async) socket$nl_netfilter(0x10, 0x3, 0xc) (async, rerun: 64) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x34, 0x10, 0x0, 0x0, 0x2, {0x0, 0x0, 0x0, 0x0, 0x0, 0x10800}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}]}, 0x34}}, 0x800) (rerun: 64) 17:03:55 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="4800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002ee44772c3f6ebb7b8464d000002000000000005002a000000000000"], 0x48}}, 0x0) 17:03:55 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) setsockopt$netrom_NETROM_IDLE(r1, 0x103, 0x7, &(0x7f0000000000)=0x800, 0x4) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r3 = accept4(0xffffffffffffffff, 0x0, &(0x7f0000000140), 0x80800) r4 = socket$nl_route(0x10, 0x3, 0x0) r5 = socket(0x10, 0x3, 0x0) r6 = socket$nl_route(0x10, 0x3, 0x0) r7 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r7, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r7, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r6, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r8, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(r5, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r8, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f0000000480)=ANY=[@ANYBLOB='D\x00\x00\x00,\x00\'\r\x00'/20, @ANYRES32=r8, @ANYBLOB="0000000000000000f3ff00000800010065333200180002001400050000000000000002000000000000083c9b9e8607000000000000000080af7c1df1dd362fd54581d9279ea336589a53af26c70cc254d2426ffd70dc7d4fa6af2b6839fcd2680acf7e7bf2eedd9f1d42b774f3292900726ee7df0b17c6954fe260c43d2d58d297316bb38871c3443fd3c8ebaf6d29b56abf3f58b3884b9730be83ae96553668588cde79c476a1df44dfa29d42a0c9dcc493fa25fe8ff29cbbea945e3009ba45e88c9248d5e0595e59c3038a976487322e98a03284db72f251c36f6905270cae03ab1a752788332d42d744e2c942b8e290a7788365ec28a2bdf5c2cb89014142f3a3fb58e6cefb1a6ee2224a82b02b71c0239eb9cd"], 0x44}}, 0x0) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r9, &(0x7f0000000200), 0xf000) sendfile(r9, r10, 0x0, 0xf03b0000) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r11, r10, &(0x7f00000002c0)=0x335773c3, 0x8) getsockopt$inet_udp_int(r11, 0x11, 0x67, &(0x7f0000000280), &(0x7f00000002c0)=0x4) sendmsg$nl_route_sched(r3, &(0x7f0000000440)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x400000}, 0xc, &(0x7f0000000400)={&(0x7f0000000200)=@getqdisc={0x28, 0x26, 0x100, 0x70bd29, 0x25dfdbfd, {0x0, 0x0, 0x0, r8, {0xfff1, 0x8}, {0xfff2, 0x2}, {0xb, 0xb}}, [{0x4}]}, 0x28}, 0x1, 0x0, 0x0, 0x90}, 0x4) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000080)={&(0x7f00000007c0)=ANY=[@ANYBLOB="7c01000019000001c666174cb227bd7000ffdbdf251d010200150004000400002007010000c20fa634521344e40100000015000100040000e001020000e0e00c4cc4e027620100000015000400040000c004010000340c2e1387ba9b9f010000001e0106000813460008648a2197cce960346d712333f4a95a77739cbb8c6ab9a829fad2fc189422bff1165d5ff9edad1f8bc1bffb08e1f1487b19627c0d6eb7c57b9740ec317d992f1b91656f853783f969ea59be419b6b59a1c2e5b7ad0bad252eaf4c1749aac3d0014505b415ce5f7c7d4315f0226da79aeb1df255eeb335ea2ea24cae8489853fa0baa91895b2c2da32626486eeaf1e32d0bfa773e5cd25a1b5ad2837aa9b57e43976a53ad90f6e128ae3ab9a666bb4f2a7c32c053ffe21d64f55a1f21840a560d9c15287c83ad4bbe7fd2ef63f9d0d7e409d85efa329fee17ed0a4e84e763fb6514ac76d7af71cb92e67a41baf2e83b038f5aaec511827d91fe3bcc93043e15dc801c75f4c0ef9e4447fc195685638224a1d18f482b90000099568757e0bb6af73d3bbb4355cb5a94ce6435b42a143345aa8b046d6a2aef5d1588c647c3bf0133ea7db25f8f9e10144c611ac66d01bd029d42960213bd5b38d24df7c59ce6e8ee7e7a66ba6e38e0e0bbf00e5690350142ed3e28f4898636a776d1f51d53c27b7318e4f1cc08c7e297a03537afa5ed821a7c906dec60a5382ba9ab057c4b4ebd7a12c874421e6839c876a08e1310e1b6835c240abcc26f0ecf343ae1b742df2fb0d7278b0326400f77e3dd23d7fa418166e720affe2657e836058ef4106631eb533cf5d7df382f7c60f1956653d72ddc80c9c6f43a7a9c0fa78"], 0x17c}, 0x1, 0x0, 0x0, 0x44001}, 0x4000000) socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x34, 0x10, 0x0, 0x0, 0x2, {0x0, 0x0, 0x0, 0x0, 0x0, 0x10800}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}]}, 0x34}}, 0x800) [ 2870.240277][T22864] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow [ 2870.330879][T22830] bond1029: (slave bridge1095): making interface the new active one [ 2870.368530][T22830] bridge1095: entered promiscuous mode [ 2870.414147][T22830] bond1029: (slave bridge1095): Enslaving as an active interface with an up link 17:03:55 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x5, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:55 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x362, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2870.456095][T22837] workqueue: Failed to create a rescuer kthread for wq "bond1099": -EINTR 17:03:55 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x15}]}, 0x3c}}, 0x0) [ 2870.532489][T22842] workqueue: Failed to create a rescuer kthread for wq "bond673": -EINTR [ 2870.789579][T22848] bond795: entered promiscuous mode [ 2870.822361][T22848] 8021q: adding VLAN 0 to HW filter on device bond795 17:03:55 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xf0ffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2870.881485][T22885] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.3'. 17:03:55 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="4800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002ee44772c3f6ebb7b8464d000002000000000005002a000000000000"], 0x48}}, 0x0) [ 2870.961475][T22887] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. 17:03:56 executing program 0: bind$inet6(0xffffffffffffffff, 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x26e1, 0x0) r1 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x18, 0x3, &(0x7f00000000c0)=@framed, &(0x7f0000000080)='GPL\x00', 0xffffff7f, 0x0, 0x0, 0x0, 0x3, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='sched_switch\x00', r1}, 0x10) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_type(r2, &(0x7f0000000000), 0x248800) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) ioctl$EXT4_IOC_CHECKPOINT(r3, 0x4004662b, &(0x7f0000000000)=0x2) syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) ioctl$EXT4_IOC_CHECKPOINT(0xffffffffffffffff, 0x4004662b, &(0x7f0000000500)=0x5) sendto$inet6(r2, &(0x7f0000000680)="6ccfdf19b55dfe4241e28bc56b1d81431574b8e8d0f05b24ae6553869db76ea40656b7dfe9b4f09bb046e5611742d5246ebe5cc94f37a461c3ff33c9cfce58b54b42254b38e22572d4852ea46a9e56824a962c7cabfa40ce0dfa292c03475b69ea56ce427766e3bfb188e6f085ebda9ccaa19c18ed8960af88074c6d156d916bfbfba7d7823ec054fe96fe9f1cfd80622c1e12d232b608e638339543a49a6b0f0416d6a6b23824a9689d4d8c1653b2652b04cbff0d61f46b691404c675fd390000000000000000", 0xc7, 0x48001, 0x0, 0x0) getsockopt$inet6_tcp_buf(r3, 0x6, 0x0, &(0x7f0000000480)=""/81, 0x0) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) preadv(0xffffffffffffffff, &(0x7f0000000040)=[{&(0x7f0000000180)=""/98, 0x62}, {&(0x7f0000000280)=""/116, 0x74}, {&(0x7f0000000380)=""/98, 0x62}], 0x3, 0x400, 0x0) pipe(&(0x7f0000000640)={0xffffffffffffffff, 0xffffffffffffffff}) r5 = syz_genetlink_get_family_id$devlink(&(0x7f00000007c0), r0) sendmsg$DEVLINK_CMD_TRAP_POLICER_GET(r4, &(0x7f00000008c0)={&(0x7f0000000780), 0xc, &(0x7f0000000880)={&(0x7f0000000800)={0x3c, r5, 0x400, 0x70bd2b, 0x25dfdbff, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}]}, 0x3c}, 0x1, 0x0, 0x0, 0x20000000}, 0xc0000) setsockopt$inet6_icmp_ICMP_FILTER(r3, 0x1, 0x1, &(0x7f0000000100)={0x6}, 0x4) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000300)={0x0, 0x4, &(0x7f0000000080)=@framed={{0x18, 0x0, 0x0, 0x0, 0x3000}, [@alu={0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffffffffffff}]}, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) sendfile(0xffffffffffffffff, 0xffffffffffffffff, &(0x7f00000002c0)=0x335773c3, 0x8) r6 = syz_init_net_socket$nfc_llcp(0x27, 0x1, 0x1) splice(0xffffffffffffffff, &(0x7f0000000540)=0x4, r6, &(0x7f0000000840)=0x8, 0x6, 0x1) syz_emit_ethernet(0x5e, &(0x7f0000000900)=ANY=[@ANYBLOB="09ff73bdffff6c7621d7cc9486dd60fec00000283a0000000000000000000000ffffac1414bbff0200000000000000000000000000018900907800000000fc00000000000000000000000000e72cbf4a0b24138b19bc4a00000000000001e2813f908556af08623faed6acce540f951d1013adc0c2b6761d055d4052e6dba79f75491e800594b62ee701cb6bb25fa90c53ceffff4b39ca8cbef63e7dd4f422cbde009fe979e5550f6a28beacd23240647995d1fa9a5b53953ad9022eb6"], 0x0) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) socket$nl_generic(0x10, 0x3, 0x10) r7 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_rx_ring(r7, 0x107, 0x5, &(0x7f0000000040)=@req3={0x8000, 0x200, 0x80, 0x20000}, 0x1c) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000580)="b33fb1bcc8c09576f3bc4d4dd6cc1b86cbefb0286aabbcb4933e2d30246696f2f17ff96e934bd49d469a0cba6c2275a2f2dfa1cf1d3f8f38d841f5be4f772e1a7369503592ac7387457b1f3a36668da6c5df0eef3b7d6fb5b37362b1ec82d32521665b851e0e861d549fec16a3fa620ebc1062d595f7623feaffdb79793c3458231ddfeda7773c6c820a90d754513ac22c37a7ff92186f6d1f026b737954680c5e997bdd67303b398c8ee43a35abc57950", 0xb1) mmap(&(0x7f0000568000/0x2000)=nil, 0x1000000, 0x0, 0x11, r7, 0x0) [ 2871.102133][T22892] bond1099: entered promiscuous mode [ 2871.109234][T22892] 8021q: adding VLAN 0 to HW filter on device bond1099 [ 2871.184316][T22897] bond1099: (slave bridge1125): making interface the new active one [ 2871.193710][T22897] bridge1125: entered promiscuous mode [ 2871.206977][T22897] bond1099: (slave bridge1125): Enslaving as an active interface with an up link 17:03:56 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x36a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2871.309755][T22914] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow [ 2871.337480][T22895] bond1030: entered promiscuous mode [ 2871.354210][T22895] 8021q: adding VLAN 0 to HW filter on device bond1030 17:03:56 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2871.427208][T22899] bond1030: (slave bridge1096): making interface the new active one [ 2871.436576][T22899] bridge1096: entered promiscuous mode [ 2871.453230][T22899] bond1030: (slave bridge1096): Enslaving as an active interface with an up link [ 2871.522947][T22900] bond673: entered promiscuous mode [ 2871.530058][T22900] 8021q: adding VLAN 0 to HW filter on device bond673 17:03:56 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x16}]}, 0x3c}}, 0x0) [ 2871.651618][T22903] bond796: entered promiscuous mode [ 2871.657777][T22903] 8021q: adding VLAN 0 to HW filter on device bond796 17:03:56 executing program 0: bind$inet6(0xffffffffffffffff, 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x26e1, 0x0) r1 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x18, 0x3, &(0x7f00000000c0)=@framed, &(0x7f0000000080)='GPL\x00', 0xffffff7f, 0x0, 0x0, 0x0, 0x3, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f00000000c0)={&(0x7f0000000080)='sched_switch\x00', r1}, 0x10) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) write$cgroup_type(r2, &(0x7f0000000000), 0x248800) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) ioctl$EXT4_IOC_CHECKPOINT(r3, 0x4004662b, &(0x7f0000000000)=0x2) syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), 0xffffffffffffffff) ioctl$EXT4_IOC_CHECKPOINT(0xffffffffffffffff, 0x4004662b, &(0x7f0000000500)=0x5) sendto$inet6(r2, &(0x7f0000000680)="6ccfdf19b55dfe4241e28bc56b1d81431574b8e8d0f05b24ae6553869db76ea40656b7dfe9b4f09bb046e5611742d5246ebe5cc94f37a461c3ff33c9cfce58b54b42254b38e22572d4852ea46a9e56824a962c7cabfa40ce0dfa292c03475b69ea56ce427766e3bfb188e6f085ebda9ccaa19c18ed8960af88074c6d156d916bfbfba7d7823ec054fe96fe9f1cfd80622c1e12d232b608e638339543a49a6b0f0416d6a6b23824a9689d4d8c1653b2652b04cbff0d61f46b691404c675fd390000000000000000", 0xc7, 0x48001, 0x0, 0x0) getsockopt$inet6_tcp_buf(r3, 0x6, 0x0, &(0x7f0000000480)=""/81, 0x0) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, 0x0, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x0, 0x0) preadv(0xffffffffffffffff, &(0x7f0000000040)=[{&(0x7f0000000180)=""/98, 0x62}, {&(0x7f0000000280)=""/116, 0x74}, {&(0x7f0000000380)=""/98, 0x62}], 0x3, 0x400, 0x0) pipe(&(0x7f0000000640)={0xffffffffffffffff, 0xffffffffffffffff}) r5 = syz_genetlink_get_family_id$devlink(&(0x7f00000007c0), r0) sendmsg$DEVLINK_CMD_TRAP_POLICER_GET(r4, &(0x7f00000008c0)={&(0x7f0000000780), 0xc, &(0x7f0000000880)={&(0x7f0000000800)={0x3c, r5, 0x400, 0x70bd2b, 0x25dfdbff, {}, [{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8}}]}, 0x3c}, 0x1, 0x0, 0x0, 0x20000000}, 0xc0000) setsockopt$inet6_icmp_ICMP_FILTER(r3, 0x1, 0x1, &(0x7f0000000100)={0x6}, 0x4) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000300)={0x0, 0x4, &(0x7f0000000080)=@framed={{0x18, 0x0, 0x0, 0x0, 0x3000}, [@alu={0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffffffffffff}]}, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) sendfile(0xffffffffffffffff, 0xffffffffffffffff, &(0x7f00000002c0)=0x335773c3, 0x8) r6 = syz_init_net_socket$nfc_llcp(0x27, 0x1, 0x1) splice(0xffffffffffffffff, &(0x7f0000000540)=0x4, r6, &(0x7f0000000840)=0x8, 0x6, 0x1) syz_emit_ethernet(0x5e, &(0x7f0000000900)=ANY=[@ANYBLOB="09ff73bdffff6c7621d7cc9486dd60fec00000283a0000000000000000000000ffffac1414bbff0200000000000000000000000000018900907800000000fc00000000000000000000000000e72cbf4a0b24138b19bc4a00000000000001e2813f908556af08623faed6acce540f951d1013adc0c2b6761d055d4052e6dba79f75491e800594b62ee701cb6bb25fa90c53ceffff4b39ca8cbef63e7dd4f422cbde009fe979e5550f6a28beacd23240647995d1fa9a5b53953ad9022eb6"], 0x0) openat$cgroup_ro(0xffffffffffffff9c, 0x0, 0x275a, 0x0) socket$nl_generic(0x10, 0x3, 0x10) r7 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_rx_ring(r7, 0x107, 0x5, &(0x7f0000000040)=@req3={0x8000, 0x200, 0x80, 0x20000}, 0x1c) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000580)="b33fb1bcc8c09576f3bc4d4dd6cc1b86cbefb0286aabbcb4933e2d30246696f2f17ff96e934bd49d469a0cba6c2275a2f2dfa1cf1d3f8f38d841f5be4f772e1a7369503592ac7387457b1f3a36668da6c5df0eef3b7d6fb5b37362b1ec82d32521665b851e0e861d549fec16a3fa620ebc1062d595f7623feaffdb79793c3458231ddfeda7773c6c820a90d754513ac22c37a7ff92186f6d1f026b737954680c5e997bdd67303b398c8ee43a35abc57950", 0xb1) mmap(&(0x7f0000568000/0x2000)=nil, 0x1000000, 0x0, 0x11, r7, 0x0) [ 2871.762359][T22911] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.3'. 17:03:56 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffa888}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:03:56 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="4800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002ee44772c3f6ebb7b8464d000002000000000005002a000000000000"], 0x48}}, 0x0) [ 2871.889777][T22922] validate_nla: 10 callbacks suppressed [ 2871.889802][T22922] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2871.928044][T22931] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow [ 2872.005532][T22922] bond1100: entered promiscuous mode [ 2872.024257][T22922] 8021q: adding VLAN 0 to HW filter on device bond1100 [ 2872.044799][T22923] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2872.161486][T22923] bond1031: entered promiscuous mode [ 2872.189260][T22923] 8021q: adding VLAN 0 to HW filter on device bond1031 17:03:57 executing program 0: r0 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_fanout(r0, 0x107, 0x12, &(0x7f0000000000)={0x0, 0x8000}, 0x4) setsockopt$SO_ATTACH_FILTER(r0, 0x1, 0x1a, &(0x7f0000000040)={0x1, &(0x7f0000000340)=[{0x6, 0x0, 0x0, 0x67b}]}, 0x10) r1 = socket$inet_udp(0x2, 0x2, 0x0) bind$inet(r1, &(0x7f0000000180)={0x2, 0x0, @dev={0xac, 0x14, 0x14, 0xd}}, 0x10) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) ioctl$ifreq_SIOCGIFINDEX_batadv_hard(r0, 0x8933, &(0x7f0000000280)={'batadv_slave_0\x00', 0x0}) sendto$packet(r2, &(0x7f00000000c0)="24d0643086209b9a6bec9cd029026f697108fa34a92972e41477b444c55b16420075e5f9b29ca6425a842f2245b1225672d4e4057a8442223beeaf7ada52c826b23e1661f32ee4", 0x47, 0x40004, &(0x7f00000002c0)={0x11, 0xf8, r3, 0x1, 0x40, 0x6, @local}, 0x14) sendmmsg$inet(r1, &(0x7f0000001440)=[{{&(0x7f0000000680)={0x2, 0x4e22, @multicast2}, 0x10, &(0x7f0000000800)=[{&(0x7f0000000380)="9ac0cf195024563fd94b73abe3f17e442ca7d5f8660c96c7ab5a13b2c29648a6db6ee0be39d814ee804334a105cb9624797b808ad88b8261abbdc634b883be0fedbb946f1520afc9e3d032eba36d4c94123bb915a29e533829b6aac57b8be4573100a56c6c45aa8db93e9c36fcf2bf5b02a0e9cb647682ea1c914edff1929151f0dbdbe5530b268ac24cb6f600"/153, 0x99}, {&(0x7f00000006c0)="19590a75d99ce20a010000001e2039a7fc48308350bb5b0c857cb2db2cdcf001f790cbdac390cb8ffa5381d829597b2ccb9499b3ebeeef4be95d2ba0dcaccecb341775da77670100cc1b4d2d00007f0000cab0595f89e3ac27e25dc73de5b22364fcdb0e9b4b830f24b1941de953cd816f1885752dba6bf9e126ca1744b9df673aa5e74aed41db05ff1b1b1505b524ecfaf336b365eac76ffca5a3e3e3e305ee7e4f8d95c11f77b700fa8e015d22802ff706e868c220fb0b942235e277193f07b5d24babb58d000000ec8dcdf062efb93e931283b9b67e5067d329a9728c2a5e7df4c505c2cf0cb0b8811ef056b98c3c1072438519f278587664742b70e8", 0xfe}, {&(0x7f0000000080)="25f8249384fe9dc720d29e1124d47d7ce4fc", 0x12}, {&(0x7f0000000140)="be79802678ef0103cd370b1112123345ff", 0x11}, {&(0x7f0000000540)="ca0f4e20e2a3adca1d75643a33754b719271cf6aca2461d5e649a262b7826e3d42afe4b4a5db687a065eac5a552fbf43ecd735dc17afd49b8eeee59b82d8a2b63ff4e92ef22b738854cd62751cb4253fa6484619bcd6381cf6b418585f3b8596f6c7672d765687dfe8a027037717f1f9cfea73f1988e6dd31b57d65e68867d2c8adb93068d6b50d24cf394852767f95541238373a9549705a648513618e989dbe25508217d9c9dcfff5090a72c4040c95bc6f335de5ad5a6c778b22e124ed1f7dd0d5eb206666df46da2ba20a5dd871efeb8e27bb2bcd4f42a266cdbd8c04321d8f5845f3ecc0997318096c08b40aa8948a4f6b7c2516423a0af098cfdb851bc9db5dd44782547d537916de129dbb11858a10430d49e56e98567996604e965238f6097c3f59db1831d528456d0f69912b3e674da808c7b0000", 0x139}, {&(0x7f00000001c0)="5db61f4e565300f668d0bd610c6820ffaeb8f4d915c8259b53e05408988b813196111365d5e519602581ccd61adfedea418ccfc4e03d7485c940f88606d3567e21d5126c3db7355ede7be197ce23616fb4571c46a6c8bedff863fdbfd9b9eccc425723a27b712e0aca909a6fdeaa6405debde57cb68e27bad319d607dd6d5fa325d790aedb2f19b0664c12fc4c2cafa01ab153e9cedd78f286e686f0df32c7f1b084c4422426de9c7c3c05a1307a356878ce86", 0xb3}], 0x6}}, {{0x0, 0x0, &(0x7f0000000cc0)=[{&(0x7f00000007c0)="ce949095e2318c3809d5d5c4c8ce54", 0xf}, {&(0x7f0000000900)="fc1eb623cc924e98cb3a00452bc1341d64072490dcdbba34", 0x18}, {&(0x7f0000000940)="b84aa3716a42d753e7832323208f9fdce0ab1d078b8e6effd647c82a4bea4f61b1503dc6f613821000595794b9b01df8728a7914f715f9153c674b4737c2e875f33af08f45ffaadda6cc108667b5c82271d62191a706a85e9f2b575279bf1f5e24b5cb611be66054d8ebb9f380da972558e32bade3a7d404401ab9ce7c26797aafbabeaf649a568e38d5e9ebd186e96b32f092cd3855548d267fd5aa187262c88814dbb303340a33f92d5fcd52f03170137ebb12271e6e16095e16c10b8d386d4199eb382fc0b9", 0xc7}, {&(0x7f0000000a40)="56f2f333ee90f253296167a17cebdd337cca8aa467128b41807c6ab38372040bf09867cc266d4b84a910f6ece6c6d3e66fd19a3c", 0x34}, {&(0x7f0000000a80)="9dfa6e970fc0557f7f5b61027e86209aebc6c48d254aeb3ef02d23f8a86399a9228b99bd8bfa63bab64c455416f8d03fdcc1d1f90b39bce74587f153349b729286aa", 0x42}, {&(0x7f0000000b00)="b31e8270facf3341ee9db79b057de4d534924e2e8fb77158327677c9cf60741101e33d4b78ec81a83f43acdbc5e2017aaad2a40d943d92e4123b4dba682f77b54c2faa8f8ddcc6b5cd50301abc7b884fd5d3456d2c2202fd1e2c518103f78b01fc6e1ad02d11ff5f159b64aefc97b6e2d4c6a4b51c626879e8a6ee1fe287185f5609733b9160b02c9bb63c86a3c0b618ad05cd53d7d1a7225675e1e48f30a9977b2b73ae84fc3a6cca", 0xa9}, {&(0x7f0000000bc0)="6e84cf6f15bc380b5a7ec2f4ea4bb37a5a778a4c525567f5839d3dc69b6c21cb8a20fb", 0x23}, {&(0x7f0000000c00)="8bac82cd633ccb22b69dfb7c821daa5f48b5238c4dd9286b09b9f01f2a526e2ceff2c0211a19dec624be4ad9cd17c15c897a9420bc8e35e09e54bac75cd4cd43fb5e2b2e93d608a6e75a0c70bfbc291c167251b0461134122295d15aefe210d72b0676c3d6d10af194e9a1bcf5848aab2bfca24deeb0dc84c5c6cf604471cb8897f6d6c49eaf2bc18a", 0x89}], 0x8}}], 0x2, 0xc044) sendmmsg$inet(r1, &(0x7f000000e880)=[{{0x0, 0x0, &(0x7f0000008900)=[{&(0x7f00000077c0)="8a", 0x1}], 0x1}}], 0x1, 0x0) [ 2872.325235][T22924] bond1100: (slave bridge1126): making interface the new active one [ 2872.335597][T22924] bridge1126: entered promiscuous mode [ 2872.351971][T22924] bond1100: (slave bridge1126): Enslaving as an active interface with an up link 17:03:57 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x36c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:57 executing program 0: r0 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_fanout(r0, 0x107, 0x12, &(0x7f0000000000)={0x0, 0x8000}, 0x4) setsockopt$SO_ATTACH_FILTER(r0, 0x1, 0x1a, &(0x7f0000000040)={0x1, &(0x7f0000000340)=[{0x6, 0x0, 0x0, 0x67b}]}, 0x10) socket$inet_udp(0x2, 0x2, 0x0) (async) r1 = socket$inet_udp(0x2, 0x2, 0x0) bind$inet(r1, &(0x7f0000000180)={0x2, 0x0, @dev={0xac, 0x14, 0x14, 0xd}}, 0x10) (async) bind$inet(r1, &(0x7f0000000180)={0x2, 0x0, @dev={0xac, 0x14, 0x14, 0xd}}, 0x10) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) ioctl$ifreq_SIOCGIFINDEX_batadv_hard(r0, 0x8933, &(0x7f0000000280)={'batadv_slave_0\x00'}) (async) ioctl$ifreq_SIOCGIFINDEX_batadv_hard(r0, 0x8933, &(0x7f0000000280)={'batadv_slave_0\x00', 0x0}) sendto$packet(r2, &(0x7f00000000c0)="24d0643086209b9a6bec9cd029026f697108fa34a92972e41477b444c55b16420075e5f9b29ca6425a842f2245b1225672d4e4057a8442223beeaf7ada52c826b23e1661f32ee4", 0x47, 0x40004, &(0x7f00000002c0)={0x11, 0xf8, r3, 0x1, 0x40, 0x6, @local}, 0x14) sendmmsg$inet(r1, &(0x7f0000001440)=[{{&(0x7f0000000680)={0x2, 0x4e22, @multicast2}, 0x10, &(0x7f0000000800)=[{&(0x7f0000000380)="9ac0cf195024563fd94b73abe3f17e442ca7d5f8660c96c7ab5a13b2c29648a6db6ee0be39d814ee804334a105cb9624797b808ad88b8261abbdc634b883be0fedbb946f1520afc9e3d032eba36d4c94123bb915a29e533829b6aac57b8be4573100a56c6c45aa8db93e9c36fcf2bf5b02a0e9cb647682ea1c914edff1929151f0dbdbe5530b268ac24cb6f600"/153, 0x99}, {&(0x7f00000006c0)="19590a75d99ce20a010000001e2039a7fc48308350bb5b0c857cb2db2cdcf001f790cbdac390cb8ffa5381d829597b2ccb9499b3ebeeef4be95d2ba0dcaccecb341775da77670100cc1b4d2d00007f0000cab0595f89e3ac27e25dc73de5b22364fcdb0e9b4b830f24b1941de953cd816f1885752dba6bf9e126ca1744b9df673aa5e74aed41db05ff1b1b1505b524ecfaf336b365eac76ffca5a3e3e3e305ee7e4f8d95c11f77b700fa8e015d22802ff706e868c220fb0b942235e277193f07b5d24babb58d000000ec8dcdf062efb93e931283b9b67e5067d329a9728c2a5e7df4c505c2cf0cb0b8811ef056b98c3c1072438519f278587664742b70e8", 0xfe}, {&(0x7f0000000080)="25f8249384fe9dc720d29e1124d47d7ce4fc", 0x12}, {&(0x7f0000000140)="be79802678ef0103cd370b1112123345ff", 0x11}, {&(0x7f0000000540)="ca0f4e20e2a3adca1d75643a33754b719271cf6aca2461d5e649a262b7826e3d42afe4b4a5db687a065eac5a552fbf43ecd735dc17afd49b8eeee59b82d8a2b63ff4e92ef22b738854cd62751cb4253fa6484619bcd6381cf6b418585f3b8596f6c7672d765687dfe8a027037717f1f9cfea73f1988e6dd31b57d65e68867d2c8adb93068d6b50d24cf394852767f95541238373a9549705a648513618e989dbe25508217d9c9dcfff5090a72c4040c95bc6f335de5ad5a6c778b22e124ed1f7dd0d5eb206666df46da2ba20a5dd871efeb8e27bb2bcd4f42a266cdbd8c04321d8f5845f3ecc0997318096c08b40aa8948a4f6b7c2516423a0af098cfdb851bc9db5dd44782547d537916de129dbb11858a10430d49e56e98567996604e965238f6097c3f59db1831d528456d0f69912b3e674da808c7b0000", 0x139}, {&(0x7f00000001c0)="5db61f4e565300f668d0bd610c6820ffaeb8f4d915c8259b53e05408988b813196111365d5e519602581ccd61adfedea418ccfc4e03d7485c940f88606d3567e21d5126c3db7355ede7be197ce23616fb4571c46a6c8bedff863fdbfd9b9eccc425723a27b712e0aca909a6fdeaa6405debde57cb68e27bad319d607dd6d5fa325d790aedb2f19b0664c12fc4c2cafa01ab153e9cedd78f286e686f0df32c7f1b084c4422426de9c7c3c05a1307a356878ce86", 0xb3}], 0x6}}, {{0x0, 0x0, &(0x7f0000000cc0)=[{&(0x7f00000007c0)="ce949095e2318c3809d5d5c4c8ce54", 0xf}, {&(0x7f0000000900)="fc1eb623cc924e98cb3a00452bc1341d64072490dcdbba34", 0x18}, {&(0x7f0000000940)="b84aa3716a42d753e7832323208f9fdce0ab1d078b8e6effd647c82a4bea4f61b1503dc6f613821000595794b9b01df8728a7914f715f9153c674b4737c2e875f33af08f45ffaadda6cc108667b5c82271d62191a706a85e9f2b575279bf1f5e24b5cb611be66054d8ebb9f380da972558e32bade3a7d404401ab9ce7c26797aafbabeaf649a568e38d5e9ebd186e96b32f092cd3855548d267fd5aa187262c88814dbb303340a33f92d5fcd52f03170137ebb12271e6e16095e16c10b8d386d4199eb382fc0b9", 0xc7}, {&(0x7f0000000a40)="56f2f333ee90f253296167a17cebdd337cca8aa467128b41807c6ab38372040bf09867cc266d4b84a910f6ece6c6d3e66fd19a3c", 0x34}, {&(0x7f0000000a80)="9dfa6e970fc0557f7f5b61027e86209aebc6c48d254aeb3ef02d23f8a86399a9228b99bd8bfa63bab64c455416f8d03fdcc1d1f90b39bce74587f153349b729286aa", 0x42}, {&(0x7f0000000b00)="b31e8270facf3341ee9db79b057de4d534924e2e8fb77158327677c9cf60741101e33d4b78ec81a83f43acdbc5e2017aaad2a40d943d92e4123b4dba682f77b54c2faa8f8ddcc6b5cd50301abc7b884fd5d3456d2c2202fd1e2c518103f78b01fc6e1ad02d11ff5f159b64aefc97b6e2d4c6a4b51c626879e8a6ee1fe287185f5609733b9160b02c9bb63c86a3c0b618ad05cd53d7d1a7225675e1e48f30a9977b2b73ae84fc3a6cca", 0xa9}, {&(0x7f0000000bc0)="6e84cf6f15bc380b5a7ec2f4ea4bb37a5a778a4c525567f5839d3dc69b6c21cb8a20fb", 0x23}, {&(0x7f0000000c00)="8bac82cd633ccb22b69dfb7c821daa5f48b5238c4dd9286b09b9f01f2a526e2ceff2c0211a19dec624be4ad9cd17c15c897a9420bc8e35e09e54bac75cd4cd43fb5e2b2e93d608a6e75a0c70bfbc291c167251b0461134122295d15aefe210d72b0676c3d6d10af194e9a1bcf5848aab2bfca24deeb0dc84c5c6cf604471cb8897f6d6c49eaf2bc18a", 0x89}], 0x8}}], 0x2, 0xc044) sendmmsg$inet(r1, &(0x7f000000e880)=[{{0x0, 0x0, &(0x7f0000008900)=[{&(0x7f00000077c0)="8a", 0x1}], 0x1}}], 0x1, 0x0) (async) sendmmsg$inet(r1, &(0x7f000000e880)=[{{0x0, 0x0, &(0x7f0000008900)=[{&(0x7f00000077c0)="8a", 0x1}], 0x1}}], 0x1, 0x0) [ 2872.533081][T22925] bond1031: (slave bridge1097): making interface the new active one [ 2872.545845][T22925] bridge1097: entered promiscuous mode 17:03:57 executing program 0: r0 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_fanout(r0, 0x107, 0x12, &(0x7f0000000000)={0x0, 0x8000}, 0x4) setsockopt$SO_ATTACH_FILTER(r0, 0x1, 0x1a, &(0x7f0000000040)={0x1, &(0x7f0000000340)=[{0x6, 0x0, 0x0, 0x67b}]}, 0x10) (async) r1 = socket$inet_udp(0x2, 0x2, 0x0) bind$inet(r1, &(0x7f0000000180)={0x2, 0x0, @dev={0xac, 0x14, 0x14, 0xd}}, 0x10) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) (async) ioctl$ifreq_SIOCGIFINDEX_batadv_hard(r0, 0x8933, &(0x7f0000000280)={'batadv_slave_0\x00', 0x0}) sendto$packet(r2, &(0x7f00000000c0)="24d0643086209b9a6bec9cd029026f697108fa34a92972e41477b444c55b16420075e5f9b29ca6425a842f2245b1225672d4e4057a8442223beeaf7ada52c826b23e1661f32ee4", 0x47, 0x40004, &(0x7f00000002c0)={0x11, 0xf8, r3, 0x1, 0x40, 0x6, @local}, 0x14) sendmmsg$inet(r1, &(0x7f0000001440)=[{{&(0x7f0000000680)={0x2, 0x4e22, @multicast2}, 0x10, &(0x7f0000000800)=[{&(0x7f0000000380)="9ac0cf195024563fd94b73abe3f17e442ca7d5f8660c96c7ab5a13b2c29648a6db6ee0be39d814ee804334a105cb9624797b808ad88b8261abbdc634b883be0fedbb946f1520afc9e3d032eba36d4c94123bb915a29e533829b6aac57b8be4573100a56c6c45aa8db93e9c36fcf2bf5b02a0e9cb647682ea1c914edff1929151f0dbdbe5530b268ac24cb6f600"/153, 0x99}, {&(0x7f00000006c0)="19590a75d99ce20a010000001e2039a7fc48308350bb5b0c857cb2db2cdcf001f790cbdac390cb8ffa5381d829597b2ccb9499b3ebeeef4be95d2ba0dcaccecb341775da77670100cc1b4d2d00007f0000cab0595f89e3ac27e25dc73de5b22364fcdb0e9b4b830f24b1941de953cd816f1885752dba6bf9e126ca1744b9df673aa5e74aed41db05ff1b1b1505b524ecfaf336b365eac76ffca5a3e3e3e305ee7e4f8d95c11f77b700fa8e015d22802ff706e868c220fb0b942235e277193f07b5d24babb58d000000ec8dcdf062efb93e931283b9b67e5067d329a9728c2a5e7df4c505c2cf0cb0b8811ef056b98c3c1072438519f278587664742b70e8", 0xfe}, {&(0x7f0000000080)="25f8249384fe9dc720d29e1124d47d7ce4fc", 0x12}, {&(0x7f0000000140)="be79802678ef0103cd370b1112123345ff", 0x11}, {&(0x7f0000000540)="ca0f4e20e2a3adca1d75643a33754b719271cf6aca2461d5e649a262b7826e3d42afe4b4a5db687a065eac5a552fbf43ecd735dc17afd49b8eeee59b82d8a2b63ff4e92ef22b738854cd62751cb4253fa6484619bcd6381cf6b418585f3b8596f6c7672d765687dfe8a027037717f1f9cfea73f1988e6dd31b57d65e68867d2c8adb93068d6b50d24cf394852767f95541238373a9549705a648513618e989dbe25508217d9c9dcfff5090a72c4040c95bc6f335de5ad5a6c778b22e124ed1f7dd0d5eb206666df46da2ba20a5dd871efeb8e27bb2bcd4f42a266cdbd8c04321d8f5845f3ecc0997318096c08b40aa8948a4f6b7c2516423a0af098cfdb851bc9db5dd44782547d537916de129dbb11858a10430d49e56e98567996604e965238f6097c3f59db1831d528456d0f69912b3e674da808c7b0000", 0x139}, {&(0x7f00000001c0)="5db61f4e565300f668d0bd610c6820ffaeb8f4d915c8259b53e05408988b813196111365d5e519602581ccd61adfedea418ccfc4e03d7485c940f88606d3567e21d5126c3db7355ede7be197ce23616fb4571c46a6c8bedff863fdbfd9b9eccc425723a27b712e0aca909a6fdeaa6405debde57cb68e27bad319d607dd6d5fa325d790aedb2f19b0664c12fc4c2cafa01ab153e9cedd78f286e686f0df32c7f1b084c4422426de9c7c3c05a1307a356878ce86", 0xb3}], 0x6}}, {{0x0, 0x0, &(0x7f0000000cc0)=[{&(0x7f00000007c0)="ce949095e2318c3809d5d5c4c8ce54", 0xf}, {&(0x7f0000000900)="fc1eb623cc924e98cb3a00452bc1341d64072490dcdbba34", 0x18}, {&(0x7f0000000940)="b84aa3716a42d753e7832323208f9fdce0ab1d078b8e6effd647c82a4bea4f61b1503dc6f613821000595794b9b01df8728a7914f715f9153c674b4737c2e875f33af08f45ffaadda6cc108667b5c82271d62191a706a85e9f2b575279bf1f5e24b5cb611be66054d8ebb9f380da972558e32bade3a7d404401ab9ce7c26797aafbabeaf649a568e38d5e9ebd186e96b32f092cd3855548d267fd5aa187262c88814dbb303340a33f92d5fcd52f03170137ebb12271e6e16095e16c10b8d386d4199eb382fc0b9", 0xc7}, {&(0x7f0000000a40)="56f2f333ee90f253296167a17cebdd337cca8aa467128b41807c6ab38372040bf09867cc266d4b84a910f6ece6c6d3e66fd19a3c", 0x34}, {&(0x7f0000000a80)="9dfa6e970fc0557f7f5b61027e86209aebc6c48d254aeb3ef02d23f8a86399a9228b99bd8bfa63bab64c455416f8d03fdcc1d1f90b39bce74587f153349b729286aa", 0x42}, {&(0x7f0000000b00)="b31e8270facf3341ee9db79b057de4d534924e2e8fb77158327677c9cf60741101e33d4b78ec81a83f43acdbc5e2017aaad2a40d943d92e4123b4dba682f77b54c2faa8f8ddcc6b5cd50301abc7b884fd5d3456d2c2202fd1e2c518103f78b01fc6e1ad02d11ff5f159b64aefc97b6e2d4c6a4b51c626879e8a6ee1fe287185f5609733b9160b02c9bb63c86a3c0b618ad05cd53d7d1a7225675e1e48f30a9977b2b73ae84fc3a6cca", 0xa9}, {&(0x7f0000000bc0)="6e84cf6f15bc380b5a7ec2f4ea4bb37a5a778a4c525567f5839d3dc69b6c21cb8a20fb", 0x23}, {&(0x7f0000000c00)="8bac82cd633ccb22b69dfb7c821daa5f48b5238c4dd9286b09b9f01f2a526e2ceff2c0211a19dec624be4ad9cd17c15c897a9420bc8e35e09e54bac75cd4cd43fb5e2b2e93d608a6e75a0c70bfbc291c167251b0461134122295d15aefe210d72b0676c3d6d10af194e9a1bcf5848aab2bfca24deeb0dc84c5c6cf604471cb8897f6d6c49eaf2bc18a", 0x89}], 0x8}}], 0x2, 0xc044) sendmmsg$inet(r1, &(0x7f000000e880)=[{{0x0, 0x0, &(0x7f0000008900)=[{&(0x7f00000077c0)="8a", 0x1}], 0x1}}], 0x1, 0x0) [ 2872.574794][T22925] bond1031: (slave bridge1097): Enslaving as an active interface with an up link [ 2872.594456][T22928] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:03:57 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:57 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x36a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:57 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x17}]}, 0x3c}}, 0x0) [ 2872.624412][T22928] workqueue: Failed to create a rescuer kthread for wq "bond674": -EINTR [ 2872.773137][T22936] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:03:57 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfffff000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2872.813409][T22936] workqueue: Failed to create a rescuer kthread for wq "bond797": -EINTR [ 2872.930875][T22940] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.3'. 17:03:58 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) setsockopt$netlink_NETLINK_ADD_MEMBERSHIP(r0, 0x10e, 0x1, &(0x7f0000000080)=0x15, 0x4) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x2c, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x1c, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x1, 0x1}}, @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL={0xc, 0x22, 0x8}]}}}]}, 0x4c}}, 0x0) [ 2873.005849][T22950] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2873.093174][T22950] bond1101: entered promiscuous mode [ 2873.112676][T22950] 8021q: adding VLAN 0 to HW filter on device bond1101 [ 2873.267693][T22954] bond1101: (slave bridge1127): making interface the new active one [ 2873.279999][T22954] bridge1127: entered promiscuous mode [ 2873.297530][T22954] bond1101: (slave bridge1127): Enslaving as an active interface with an up link [ 2873.308388][T22962] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:03:58 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x37a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2873.406421][T22962] bond1032: entered promiscuous mode [ 2873.426945][T22962] 8021q: adding VLAN 0 to HW filter on device bond1032 [ 2873.551367][T22964] bond1032: (slave bridge1098): making interface the new active one [ 2873.573792][T22964] bridge1098: entered promiscuous mode 17:03:58 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x8, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2873.594597][T22964] bond1032: (slave bridge1098): Enslaving as an active interface with an up link [ 2873.616799][T22969] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:03:58 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x18}]}, 0x3c}}, 0x0) [ 2873.736744][T22969] bond674: entered promiscuous mode [ 2873.742856][T22969] 8021q: adding VLAN 0 to HW filter on device bond674 [ 2873.757249][T22966] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 2873.847724][T22966] bond506: entered promiscuous mode [ 2873.877195][T22966] 8021q: adding VLAN 0 to HW filter on device bond506 [ 2873.967967][T22971] bond506: (slave bridge903): making interface the new active one [ 2873.978110][T22971] bridge903: entered promiscuous mode [ 2873.991876][T22971] bond506: (slave bridge903): Enslaving as an active interface with an up link [ 2874.001439][T22973] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:03:59 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x36a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:03:59 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffff7f}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2874.023445][T22973] workqueue: Failed to create a rescuer kthread for wq "bond797": -EINTR [ 2874.136965][T22983] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:03:59 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) setsockopt$netlink_NETLINK_ADD_MEMBERSHIP(r0, 0x10e, 0x1, &(0x7f0000000080)=0x15, 0x4) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x2c, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x1c, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x1, 0x1}}, @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL={0xc, 0x22, 0x8}]}}}]}, 0x4c}}, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) setsockopt$netlink_NETLINK_ADD_MEMBERSHIP(r0, 0x10e, 0x1, &(0x7f0000000080)=0x15, 0x4) (async) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x2c, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x1c, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x1, 0x1}}, @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL={0xc, 0x22, 0x8}]}}}]}, 0x4c}}, 0x0) (async) [ 2874.261905][T22983] bond1102: entered promiscuous mode [ 2874.267724][T22983] 8021q: adding VLAN 0 to HW filter on device bond1102 17:03:59 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x37c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2874.513302][T22985] bond1102: (slave bridge1128): making interface the new active one [ 2874.527052][T22985] bridge1128: entered promiscuous mode [ 2874.549412][T22985] bond1102: (slave bridge1128): Enslaving as an active interface with an up link 17:03:59 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x9, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2874.643713][T22988] workqueue: Failed to create a rescuer kthread for wq "bond1033": -EINTR 17:03:59 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x19}]}, 0x3c}}, 0x0) [ 2874.859259][T22993] workqueue: Failed to create a rescuer kthread for wq "bond675": -EINTR [ 2875.072335][T22997] bond507: entered promiscuous mode [ 2875.102836][T22997] 8021q: adding VLAN 0 to HW filter on device bond507 [ 2875.217514][T23002] bond507: (slave bridge904): making interface the new active one [ 2875.238449][T23002] bridge904: entered promiscuous mode [ 2875.262874][T23002] bond507: (slave bridge904): Enslaving as an active interface with an up link 17:04:00 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x36a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:00 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffff9e}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:04:00 executing program 3: socket$netlink(0x10, 0x3, 0x0) (async) r0 = socket$netlink(0x10, 0x3, 0x0) setsockopt$netlink_NETLINK_ADD_MEMBERSHIP(r0, 0x10e, 0x1, &(0x7f0000000080)=0x15, 0x4) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x2c, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x1c, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x1, 0x1}}, @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL={0xc, 0x22, 0x8}]}}}]}, 0x4c}}, 0x0) [ 2875.279380][T23001] workqueue: Failed to create a rescuer kthread for wq "bond797": -EINTR [ 2875.557976][T23013] bond1103: entered promiscuous mode [ 2875.605979][T23013] 8021q: adding VLAN 0 to HW filter on device bond1103 17:04:00 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x382, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2875.773683][T23015] bond1103: (slave bridge1129): making interface the new active one [ 2875.782994][T23015] bridge1129: entered promiscuous mode [ 2875.795296][T23015] bond1103: (slave bridge1129): Enslaving as an active interface with an up link [ 2875.820867][T23019] workqueue: Failed to create a rescuer kthread for wq "bond675": -EINTR 17:04:00 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x1a}]}, 0x3c}}, 0x0) 17:04:01 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2875.878501][T23021] workqueue: Failed to create a rescuer kthread for wq "bond1033": -EINTR [ 2876.152739][T23029] bond508: entered promiscuous mode [ 2876.185504][T23029] 8021q: adding VLAN 0 to HW filter on device bond508 [ 2876.301057][T23031] bond508: (slave bridge905): making interface the new active one [ 2876.310675][T23031] bridge905: entered promiscuous mode [ 2876.331488][T23031] bond508: (slave bridge905): Enslaving as an active interface with an up link 17:04:01 executing program 0: r0 = socket(0x40000000002, 0x3, 0x2) getsockopt$bt_sco_SCO_OPTIONS(r0, 0x11, 0x1, &(0x7f0000000300)=""/144, &(0x7f0000000000)=0x90) recvmmsg(r0, &(0x7f0000000240)=[{{0x0, 0xfffffffffffffea7, 0x0, 0x0, 0x0, 0xfffffffffffffec8}}], 0x4000000000002c5, 0x2, 0x0) r1 = socket(0x40000000002, 0x3, 0x80000000002) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000200), 0xf000) sendfile(r2, r3, 0x0, 0xf03b0000) sendfile(r2, r2, &(0x7f0000000180), 0x0) listen(r2, 0x2) setsockopt$inet_int(r0, 0x0, 0x6, &(0x7f0000000040)=0xaa7c, 0x4) ioctl$sock_inet6_SIOCDELRT(0xffffffffffffffff, 0x890c, &(0x7f0000000280)={@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', @private2, @private0={0xfc, 0x0, '\x00', 0x1}}) setsockopt$inet_int(r1, 0x0, 0x4, &(0x7f00000000c0), 0x4) sendto$unix(r1, 0x0, 0x0, 0x0, &(0x7f0000000180)=@abs, 0x6e) 17:04:01 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfffffff0}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:04:01 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x4) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2876.352309][T23034] workqueue: Failed to create a rescuer kthread for wq "bond797": -EINTR [ 2876.588035][T23045] bond1104: entered promiscuous mode [ 2876.606028][T23045] 8021q: adding VLAN 0 to HW filter on device bond1104 [ 2876.781399][T23046] bond1104: (slave bridge1130): making interface the new active one [ 2876.803243][T23046] bridge1130: entered promiscuous mode [ 2876.828140][T23046] bond1104: (slave bridge1130): Enslaving as an active interface with an up link 17:04:01 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x38e, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2876.913481][T23048] bond675 (unregistering): Released all slaves [ 2876.985646][T23052] validate_nla: 11 callbacks suppressed [ 2876.985664][T23052] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:04:02 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x1b}]}, 0x3c}}, 0x0) 17:04:02 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xc, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2877.053801][T23052] workqueue: Failed to create a rescuer kthread for wq "bond1033": -EINTR [ 2877.267668][T23062] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:02 executing program 0: r0 = socket(0x40000000002, 0x3, 0x2) getsockopt$bt_sco_SCO_OPTIONS(r0, 0x11, 0x1, &(0x7f0000000300)=""/144, &(0x7f0000000000)=0x90) recvmmsg(r0, &(0x7f0000000240)=[{{0x0, 0xfffffffffffffea7, 0x0, 0x0, 0x0, 0xfffffffffffffec8}}], 0x4000000000002c5, 0x2, 0x0) (async) r1 = socket(0x40000000002, 0x3, 0x80000000002) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000200), 0xf000) (async, rerun: 64) sendfile(r2, r3, 0x0, 0xf03b0000) (async, rerun: 64) sendfile(r2, r2, &(0x7f0000000180), 0x0) listen(r2, 0x2) (async) setsockopt$inet_int(r0, 0x0, 0x6, &(0x7f0000000040)=0xaa7c, 0x4) ioctl$sock_inet6_SIOCDELRT(0xffffffffffffffff, 0x890c, &(0x7f0000000280)={@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', @private2, @private0={0xfc, 0x0, '\x00', 0x1}}) (async, rerun: 32) setsockopt$inet_int(r1, 0x0, 0x4, &(0x7f00000000c0), 0x4) (async, rerun: 32) sendto$unix(r1, 0x0, 0x0, 0x0, &(0x7f0000000180)=@abs, 0x6e) [ 2877.380301][T23062] bond797: entered promiscuous mode [ 2877.386057][T23062] 8021q: adding VLAN 0 to HW filter on device bond797 17:04:02 executing program 0: r0 = socket(0x40000000002, 0x3, 0x2) getsockopt$bt_sco_SCO_OPTIONS(r0, 0x11, 0x1, &(0x7f0000000300)=""/144, &(0x7f0000000000)=0x90) (async) getsockopt$bt_sco_SCO_OPTIONS(r0, 0x11, 0x1, &(0x7f0000000300)=""/144, &(0x7f0000000000)=0x90) recvmmsg(r0, &(0x7f0000000240)=[{{0x0, 0xfffffffffffffea7, 0x0, 0x0, 0x0, 0xfffffffffffffec8}}], 0x4000000000002c5, 0x2, 0x0) socket(0x40000000002, 0x3, 0x80000000002) (async) r1 = socket(0x40000000002, 0x3, 0x80000000002) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000200), 0xf000) sendfile(r2, r3, 0x0, 0xf03b0000) sendfile(r2, r2, &(0x7f0000000180), 0x0) (async) sendfile(r2, r2, &(0x7f0000000180), 0x0) listen(r2, 0x2) setsockopt$inet_int(r0, 0x0, 0x6, &(0x7f0000000040)=0xaa7c, 0x4) ioctl$sock_inet6_SIOCDELRT(0xffffffffffffffff, 0x890c, &(0x7f0000000280)={@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', @private2, @private0={0xfc, 0x0, '\x00', 0x1}}) (async) ioctl$sock_inet6_SIOCDELRT(0xffffffffffffffff, 0x890c, &(0x7f0000000280)={@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', @private2, @private0={0xfc, 0x0, '\x00', 0x1}}) setsockopt$inet_int(r1, 0x0, 0x4, &(0x7f00000000c0), 0x4) sendto$unix(r1, 0x0, 0x0, 0x0, &(0x7f0000000180)=@abs, 0x6e) 17:04:02 executing program 3: socket$netlink(0x10, 0x3, 0x0) (async) r0 = socket$netlink(0x10, 0x3, 0x0) socket$nl_route(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x4) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2877.503378][T23071] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:04:02 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2877.597893][T23071] bond1105: entered promiscuous mode [ 2877.604761][T23071] 8021q: adding VLAN 0 to HW filter on device bond1105 17:04:02 executing program 0: r0 = socket(0x40000000002, 0x3, 0x2) getsockopt$bt_sco_SCO_OPTIONS(r0, 0x11, 0x1, &(0x7f0000000300)=""/144, &(0x7f0000000000)=0x90) recvmmsg(r0, &(0x7f0000000240)=[{{0x0, 0xfffffffffffffea7, 0x0, 0x0, 0x0, 0xfffffffffffffec8}}], 0x4000000000002c5, 0x2, 0x0) r1 = socket(0x40000000002, 0x3, 0x80000000002) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r2, &(0x7f0000000200), 0xf000) sendfile(r2, r3, 0x0, 0xf03b0000) sendfile(r2, r2, &(0x7f0000000180), 0x0) listen(r2, 0x2) setsockopt$inet_int(r0, 0x0, 0x6, &(0x7f0000000040)=0xaa7c, 0x4) ioctl$sock_inet6_SIOCDELRT(0xffffffffffffffff, 0x890c, &(0x7f0000000280)={@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02', @private2, @private0={0xfc, 0x0, '\x00', 0x1}}) setsockopt$inet_int(r1, 0x0, 0x4, &(0x7f00000000c0), 0x4) sendto$unix(r1, 0x0, 0x0, 0x0, &(0x7f0000000180)=@abs, 0x6e) [ 2877.785527][T23072] bond1105: (slave bridge1131): making interface the new active one [ 2877.812143][T23072] bridge1131: entered promiscuous mode [ 2877.827058][T23072] bond1105: (slave bridge1131): Enslaving as an active interface with an up link [ 2877.848263][T23076] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:02 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x392, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:02 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x1c}]}, 0x3c}}, 0x0) [ 2877.924097][T23076] bond675: entered promiscuous mode [ 2877.943156][T23076] 8021q: adding VLAN 0 to HW filter on device bond675 [ 2877.965726][T23080] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2878.067158][T23080] bond1033: entered promiscuous mode [ 2878.086132][T23080] 8021q: adding VLAN 0 to HW filter on device bond1033 [ 2878.166860][T23081] bond1033: (slave bridge1099): making interface the new active one [ 2878.184794][T23081] bridge1099: entered promiscuous mode [ 2878.211941][T23081] bond1033: (slave bridge1099): Enslaving as an active interface with an up link [ 2878.228065][T23095] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:03 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xd, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2878.327448][T23095] bond798: entered promiscuous mode [ 2878.344674][T23095] 8021q: adding VLAN 0 to HW filter on device bond798 17:04:03 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x18, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:04:03 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async, rerun: 64) socket$nl_route(0x10, 0x3, 0x0) (rerun: 64) socket$netlink(0x10, 0x3, 0x4) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2878.479538][T23110] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2878.583773][T23110] bond1106: entered promiscuous mode [ 2878.609267][T23110] 8021q: adding VLAN 0 to HW filter on device bond1106 17:04:03 executing program 0: r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x0, 0x0) ioctl$TUNSETIFF(r0, 0x400454ca, &(0x7f00000000c0)={'syzkaller1\x00', 0x2}) ioctl$TUNSETLINK(r0, 0x400454cd, 0x336) r1 = socket$kcm(0x2, 0xa, 0x2) r2 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000015c0)='cgroup.controllers\x00', 0x275a, 0x0) r3 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_CT_NEW(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000011c0)={0x14, 0x0, 0x1, 0x401, 0x0, 0x0, {0x2}}, 0x14}}, 0x0) r4 = socket$netlink(0x10, 0x3, 0x0) writev(r4, &(0x7f0000000080)=[{&(0x7f00000000c0)="390000001300090468fe0700000000000000ff3f04000000480100100000000004002b000a00010014a4ee1ee438d2fd000000000000007200", 0xfffffe10}], 0x1) writev(r4, &(0x7f0000000000)=[{&(0x7f0000000040)="3900000013001104680907000000000f0000ff3f04000000290a001700000000040037000a00030014917c82aa75b9a64411f6a4ee1ee438d2", 0x39}], 0x1) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001dc0)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0}}) r7 = socket$nl_generic(0x10, 0x3, 0x10) r8 = socket$inet_udp(0x2, 0x2, 0x0) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r8, 0x8933, &(0x7f0000000080)) r9 = socket$inet_udplite(0x2, 0x2, 0x88) setsockopt$IP_VS_SO_SET_STARTDAEMON(r9, 0x0, 0x48b, &(0x7f0000000500)={0x2, 'gre0\x00', 0x1}, 0x18) ioctl$EXT4_IOC_CHECKPOINT(0xffffffffffffffff, 0x4004662b, &(0x7f0000000000)=0x2) pipe(&(0x7f0000001440)={0xffffffffffffffff}) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='hugetlb.1GB.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_CHECKPOINT(r11, 0x4004662b, &(0x7f0000000000)=0x2) r12 = bpf$OBJ_GET_MAP(0x7, &(0x7f00000014c0)={&(0x7f0000001480)='./file0\x00', 0x0, 0x8}, 0x10) bpf$PROG_LOAD(0x5, &(0x7f0000001540)={0xd, 0x3, &(0x7f0000001280)=@framed={{0x18, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0xff}}, &(0x7f0000001700)='syzkaller\x00', 0x1, 0x98, &(0x7f0000001300)=""/152, 0x41100, 0x14, '\x00', 0x0, 0x20, 0xffffffffffffffff, 0x8, &(0x7f00000013c0)={0x3, 0x2}, 0x8, 0x10, &(0x7f0000001400)={0x3, 0x3, 0x59, 0x7fffffff}, 0x10, 0xffffffffffffffff, r2, 0x0, &(0x7f0000001500)=[r2, 0x1, r8, r10, r11, r12]}, 0x80) r13 = bpf$MAP_CREATE(0x0, &(0x7f0000000100)=@base={0x5, 0x100004, 0x20104, 0x9, 0x1, 0x1}, 0x48) bpf$MAP_UPDATE_BATCH(0x1a, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000300), &(0x7f0000000240), 0x402, r13}, 0x38) sendmsg$BATADV_CMD_TP_METER(r7, &(0x7f0000000dc0)={0x0, 0x0, &(0x7f0000000d80)={&(0x7f0000001600)=ANY=[@ANYRES32=r13, @ANYRESOCT=r5, @ANYRESHEX, @ANYRESOCT, @ANYBLOB="f9717c515ea6e206df2423e1320d8ef87d776903646e5b2044583ba74da88965ee9194854b0aee5aba3fd2b86bf6ebad9d42b66bb6c135d0b436711cb3690b635027b1dd2f12889f0dd6e40511f6ba3b260082108d51b001f73a95974d05716b3515e64a83c6153b5959aabc5f0b3eb6939d33e583a9fb82067c107eb6000000000001008f56f3538992ca89c1", @ANYRESOCT=r4, @ANYRES8=r2], 0x30}, 0x1, 0x0, 0x0, 0x44001}, 0x0) ioctl(r2, 0x1ff, &(0x7f0000001200)="7c9a49b8cc36400ca9cdadea21e864f3574f2e942119581485939b79729790c0e67eb19dd8df790517dbcb250b15b773eec5d6ea734c9611158bd22ed51f73bd8ecbaaa741c20a909a6862075cec84df8d") ioctl$BTRFS_IOC_RESIZE(r3, 0x50009403, &(0x7f0000000080)=ANY=[@ANYRES32=r2, @ANYBLOB='\x00\x00\x00\x00', @ANYRESDEC=r6, @ANYBLOB="3a06303030308473070fe37487fe303030303030333165"]) ioctl$BTRFS_IOC_SUBVOL_CREATE_V2(r0, 0x50009418, &(0x7f00000001c0)={{r1}, 0x0, 0x1e, @inherit={0x50, &(0x7f0000000000)={0x1, 0x1, 0x401, 0x2, {0xc, 0x9, 0x3ff, 0x4, 0x2}, [0x8]}}, @devid=r6}) ioctl$SIOCSIFHWADDR(r1, 0x8914, &(0x7f0000000180)={'syzkaller1\x00', @link_local}) close(r0) 17:04:03 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$IPCTNL_MSG_TIMEOUT_DELETE(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x100}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)={0x70, 0x2, 0x8, 0x801, 0x0, 0x0, {0x5, 0x0, 0xa}, [@CTA_TIMEOUT_DATA={0x4c, 0x4, 0x0, 0x1, @icmp=[@CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x1}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x851}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x200}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x40}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0xfffffffa}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0xf7a6}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x6}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x1ff}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x7}]}, @CTA_TIMEOUT_L4PROTO={0x5, 0x3, 0x84}, @CTA_TIMEOUT_L4PROTO={0x5, 0x3, 0x11}]}, 0x70}, 0x1, 0x0, 0x0, 0x4044810}, 0x4000800) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2878.856854][T23111] bond1106: (slave bridge1132): making interface the new active one [ 2878.885540][T23111] bridge1132: entered promiscuous mode [ 2878.924679][T23111] bond1106: (slave bridge1132): Enslaving as an active interface with an up link [ 2878.956225][T23113] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:03 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x39a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:04 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x1d}]}, 0x3c}}, 0x0) [ 2879.002315][T23113] workqueue: Failed to create a rescuer kthread for wq "bond676": -EINTR [ 2879.041132][T23139] warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow [ 2879.054859][T23118] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2879.161320][T23118] bond1034: entered promiscuous mode [ 2879.179888][T23118] 8021q: adding VLAN 0 to HW filter on device bond1034 17:04:04 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xe, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2879.252980][T23119] bond1034: (slave bridge1100): making interface the new active one [ 2879.262054][T23119] bridge1100: entered promiscuous mode [ 2879.275377][T23119] bond1034: (slave bridge1100): Enslaving as an active interface with an up link [ 2879.285780][T23121] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2879.344957][T23121] bond799: entered promiscuous mode [ 2879.366500][T23121] 8021q: adding VLAN 0 to HW filter on device bond799 17:04:04 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2879.561504][T23135] netlink: 9 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2879.571963][T23135] ‘|‚ªu: renamed from gretap0 [ 2879.591162][T23135] ‘|‚ªu: entered allmulticast mode [ 2879.603876][T23135] A link change request failed with some changes committed already. Interface ‘|‚ªu may have been left with an inconsistent configuration, please check. 17:04:04 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$IPCTNL_MSG_TIMEOUT_DELETE(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x100}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)={0x70, 0x2, 0x8, 0x801, 0x0, 0x0, {0x5, 0x0, 0xa}, [@CTA_TIMEOUT_DATA={0x4c, 0x4, 0x0, 0x1, @icmp=[@CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x1}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x851}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x200}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x40}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0xfffffffa}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0xf7a6}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x6}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x1ff}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x7}]}, @CTA_TIMEOUT_L4PROTO={0x5, 0x3, 0x84}, @CTA_TIMEOUT_L4PROTO={0x5, 0x3, 0x11}]}, 0x70}, 0x1, 0x0, 0x0, 0x4044810}, 0x4000800) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2879.693971][T23161] IPVS: sync thread started: state = BACKUP, mcast_ifn = gre0, syncid = 1, id = 0 [ 2879.762142][T23142] bond1107: entered promiscuous mode [ 2879.768201][T23142] 8021q: adding VLAN 0 to HW filter on device bond1107 17:04:04 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3a2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2879.941660][T23146] bond1107: (slave bridge1133): making interface the new active one [ 2879.952248][T23146] bridge1133: entered promiscuous mode [ 2879.966731][T23146] bond1107: (slave bridge1133): Enslaving as an active interface with an up link [ 2880.073489][T23151] bond676: entered promiscuous mode [ 2880.088188][T23151] 8021q: adding VLAN 0 to HW filter on device bond676 17:04:05 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x1e}]}, 0x3c}}, 0x0) [ 2880.243571][T23156] bond1035: entered promiscuous mode [ 2880.249617][T23156] 8021q: adding VLAN 0 to HW filter on device bond1035 [ 2880.361896][T23158] bond1035: (slave bridge1101): making interface the new active one [ 2880.376217][T23158] bridge1101: entered promiscuous mode [ 2880.390359][T23158] bond1035: (slave bridge1101): Enslaving as an active interface with an up link 17:04:05 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x10, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:05 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$IPCTNL_MSG_TIMEOUT_DELETE(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x100}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)={0x70, 0x2, 0x8, 0x801, 0x0, 0x0, {0x5, 0x0, 0xa}, [@CTA_TIMEOUT_DATA={0x4c, 0x4, 0x0, 0x1, @icmp=[@CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x1}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x851}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x200}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x40}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0xfffffffa}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0xf7a6}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x6}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x1ff}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x7}]}, @CTA_TIMEOUT_L4PROTO={0x5, 0x3, 0x84}, @CTA_TIMEOUT_L4PROTO={0x5, 0x3, 0x11}]}, 0x70}, 0x1, 0x0, 0x0, 0x4044810}, 0x4000800) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$IPCTNL_MSG_TIMEOUT_DELETE(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x100}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)={0x70, 0x2, 0x8, 0x801, 0x0, 0x0, {0x5, 0x0, 0xa}, [@CTA_TIMEOUT_DATA={0x4c, 0x4, 0x0, 0x1, @icmp=[@CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x1}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x851}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x200}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x40}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0xfffffffa}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0xf7a6}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x6}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x1ff}, @CTA_TIMEOUT_ICMP_TIMEOUT={0x8, 0x1, 0x1, 0x0, 0x7}]}, @CTA_TIMEOUT_L4PROTO={0x5, 0x3, 0x84}, @CTA_TIMEOUT_L4PROTO={0x5, 0x3, 0x11}]}, 0x70}, 0x1, 0x0, 0x0, 0x4044810}, 0x4000800) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) (async) 17:04:05 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x0, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) 17:04:05 executing program 0: r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x0, 0x0) ioctl$TUNSETIFF(r0, 0x400454ca, &(0x7f00000000c0)={'syzkaller1\x00', 0x2}) ioctl$TUNSETLINK(r0, 0x400454cd, 0x336) r1 = socket$kcm(0x2, 0xa, 0x2) (async) r2 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000015c0)='cgroup.controllers\x00', 0x275a, 0x0) r3 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_CT_NEW(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000011c0)={0x14, 0x0, 0x1, 0x401, 0x0, 0x0, {0x2}}, 0x14}}, 0x0) (async) r4 = socket$netlink(0x10, 0x3, 0x0) writev(r4, &(0x7f0000000080)=[{&(0x7f00000000c0)="390000001300090468fe0700000000000000ff3f04000000480100100000000004002b000a00010014a4ee1ee438d2fd000000000000007200", 0xfffffe10}], 0x1) (async) writev(r4, &(0x7f0000000000)=[{&(0x7f0000000040)="3900000013001104680907000000000f0000ff3f04000000290a001700000000040037000a00030014917c82aa75b9a64411f6a4ee1ee438d2", 0x39}], 0x1) (async, rerun: 32) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001dc0)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0}}) (async, rerun: 32) r7 = socket$nl_generic(0x10, 0x3, 0x10) r8 = socket$inet_udp(0x2, 0x2, 0x0) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r8, 0x8933, &(0x7f0000000080)) (async) r9 = socket$inet_udplite(0x2, 0x2, 0x88) setsockopt$IP_VS_SO_SET_STARTDAEMON(r9, 0x0, 0x48b, &(0x7f0000000500)={0x2, 'gre0\x00', 0x1}, 0x18) (async) ioctl$EXT4_IOC_CHECKPOINT(0xffffffffffffffff, 0x4004662b, &(0x7f0000000000)=0x2) pipe(&(0x7f0000001440)={0xffffffffffffffff}) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='hugetlb.1GB.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_CHECKPOINT(r11, 0x4004662b, &(0x7f0000000000)=0x2) (async, rerun: 64) r12 = bpf$OBJ_GET_MAP(0x7, &(0x7f00000014c0)={&(0x7f0000001480)='./file0\x00', 0x0, 0x8}, 0x10) (rerun: 64) bpf$PROG_LOAD(0x5, &(0x7f0000001540)={0xd, 0x3, &(0x7f0000001280)=@framed={{0x18, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0xff}}, &(0x7f0000001700)='syzkaller\x00', 0x1, 0x98, &(0x7f0000001300)=""/152, 0x41100, 0x14, '\x00', 0x0, 0x20, 0xffffffffffffffff, 0x8, &(0x7f00000013c0)={0x3, 0x2}, 0x8, 0x10, &(0x7f0000001400)={0x3, 0x3, 0x59, 0x7fffffff}, 0x10, 0xffffffffffffffff, r2, 0x0, &(0x7f0000001500)=[r2, 0x1, r8, r10, r11, r12]}, 0x80) r13 = bpf$MAP_CREATE(0x0, &(0x7f0000000100)=@base={0x5, 0x100004, 0x20104, 0x9, 0x1, 0x1}, 0x48) bpf$MAP_UPDATE_BATCH(0x1a, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000300), &(0x7f0000000240), 0x402, r13}, 0x38) (async) sendmsg$BATADV_CMD_TP_METER(r7, &(0x7f0000000dc0)={0x0, 0x0, &(0x7f0000000d80)={&(0x7f0000001600)=ANY=[@ANYRES32=r13, @ANYRESOCT=r5, @ANYRESHEX, @ANYRESOCT, @ANYBLOB="f9717c515ea6e206df2423e1320d8ef87d776903646e5b2044583ba74da88965ee9194854b0aee5aba3fd2b86bf6ebad9d42b66bb6c135d0b436711cb3690b635027b1dd2f12889f0dd6e40511f6ba3b260082108d51b001f73a95974d05716b3515e64a83c6153b5959aabc5f0b3eb6939d33e583a9fb82067c107eb6000000000001008f56f3538992ca89c1", @ANYRESOCT=r4, @ANYRES8=r2], 0x30}, 0x1, 0x0, 0x0, 0x44001}, 0x0) (async) ioctl(r2, 0x1ff, &(0x7f0000001200)="7c9a49b8cc36400ca9cdadea21e864f3574f2e942119581485939b79729790c0e67eb19dd8df790517dbcb250b15b773eec5d6ea734c9611158bd22ed51f73bd8ecbaaa741c20a909a6862075cec84df8d") (async) ioctl$BTRFS_IOC_RESIZE(r3, 0x50009403, &(0x7f0000000080)=ANY=[@ANYRES32=r2, @ANYBLOB='\x00\x00\x00\x00', @ANYRESDEC=r6, @ANYBLOB="3a06303030308473070fe37487fe303030303030333165"]) ioctl$BTRFS_IOC_SUBVOL_CREATE_V2(r0, 0x50009418, &(0x7f00000001c0)={{r1}, 0x0, 0x1e, @inherit={0x50, &(0x7f0000000000)={0x1, 0x1, 0x401, 0x2, {0xc, 0x9, 0x3ff, 0x4, 0x2}, [0x8]}}, @devid=r6}) (async) ioctl$SIOCSIFHWADDR(r1, 0x8914, &(0x7f0000000180)={'syzkaller1\x00', @link_local}) close(r0) [ 2880.414089][T23159] workqueue: Failed to create a rescuer kthread for wq "bond800": -EINTR [ 2880.867912][T23172] bond1108: entered promiscuous mode [ 2880.899393][T23172] 8021q: adding VLAN 0 to HW filter on device bond1108 17:04:05 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3aa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2880.964097][T23173] bond1108: (slave bridge1134): making interface the new active one [ 2880.973094][T23173] bridge1134: entered promiscuous mode [ 2880.988501][T23173] bond1108: (slave bridge1134): Enslaving as an active interface with an up link [ 2881.082369][T23176] bond677: entered promiscuous mode [ 2881.106697][T23176] 8021q: adding VLAN 0 to HW filter on device bond677 17:04:06 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x21}]}, 0x3c}}, 0x0) [ 2881.261877][T23181] bond1036: entered promiscuous mode [ 2881.272832][T23181] 8021q: adding VLAN 0 to HW filter on device bond1036 17:04:06 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x12, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2881.328499][T23183] bond1036: (slave bridge1102): making interface the new active one [ 2881.336786][T23183] bridge1102: entered promiscuous mode [ 2881.350432][T23183] bond1036: (slave bridge1102): Enslaving as an active interface with an up link [ 2881.446446][T23189] bond800: entered promiscuous mode [ 2881.456567][T23189] 8021q: adding VLAN 0 to HW filter on device bond800 17:04:06 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000000), 0x8) sendmsg$TIPC_NL_PUBL_GET(r1, &(0x7f00000000c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x14000}, 0xc, &(0x7f0000000080)={&(0x7f0000000280)={0x1c4, 0x0, 0x400, 0x70bd28, 0x25dfdbfc, {}, [@TIPC_NLA_NET={0x34, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x8}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x8ecf}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x9}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x401}, @TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x7f}]}, @TIPC_NLA_NODE={0xac, 0x6, 0x0, 0x1, [@TIPC_NLA_NODE_REKEYING={0x8, 0x6, 0x2}, @TIPC_NLA_NODE_KEY={0x41, 0x4, {'gcm(aes)\x00', 0x19, "261383bcca45d9d47656848d3559ed7f8d476b61111ee60688"}}, @TIPC_NLA_NODE_KEY_MASTER={0x4}, @TIPC_NLA_NODE_KEY_MASTER={0x4}, @TIPC_NLA_NODE_KEY={0x49, 0x4, {'gcm(aes)\x00', 0x21, "a7976d909e4ed9b773a6152d6888221afdac97d32b4b45c800860802a56909c4a4"}}, @TIPC_NLA_NODE_ADDR={0x8, 0x1, 0x200}]}, @TIPC_NLA_NET={0x64, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x9}, @TIPC_NLA_NET_NODEID_W1={0xc}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x7}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x80000001}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3ff}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x4}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x1000}, @TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x6}]}, @TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x80000000}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x7ff}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x7}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x5}]}, @TIPC_NLA_BEARER={0x3c, 0x1, 0x0, 0x1, [@TIPC_NLA_BEARER_UDP_OPTS={0x2c, 0x4, {{0x14, 0x1, @in={0x2, 0x4e21, @rand_addr=0x64010102}}, {0x14, 0x2, @in={0x2, 0x4e23, @multicast1}}}}, @TIPC_NLA_BEARER_PROP={0xc, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x6}]}]}, @TIPC_NLA_NET={0xc, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_ADDR={0x8, 0x2, 0x5}]}]}, 0x1c4}, 0x1, 0x0, 0x0, 0x20008880}, 0x2000c008) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) 17:04:06 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x2}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2881.714035][T23207] bond1109: entered promiscuous mode [ 2881.719821][T23207] 8021q: adding VLAN 0 to HW filter on device bond1109 17:04:06 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3b2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2881.781130][T23209] bond1109: (slave bridge1135): making interface the new active one [ 2881.790711][T23209] bridge1135: entered promiscuous mode [ 2881.803980][T23209] bond1109: (slave bridge1135): Enslaving as an active interface with an up link [ 2881.887623][T23211] bond678: entered promiscuous mode [ 2881.902990][T23211] 8021q: adding VLAN 0 to HW filter on device bond678 17:04:06 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x22}]}, 0x3c}}, 0x0) [ 2881.993740][T23215] bond1037: entered promiscuous mode [ 2881.999954][T23215] 8021q: adding VLAN 0 to HW filter on device bond1037 [ 2882.153000][T23216] bond1037: (slave bridge1103): making interface the new active one [ 2882.166303][T23216] bridge1103: entered promiscuous mode [ 2882.192608][T23216] bond1037: (slave bridge1103): Enslaving as an active interface with an up link 17:04:07 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:07 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async) socket(0x0, 0x0, 0x0) (async) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000000), 0x8) sendmsg$TIPC_NL_PUBL_GET(r1, &(0x7f00000000c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x14000}, 0xc, &(0x7f0000000080)={&(0x7f0000000280)={0x1c4, 0x0, 0x400, 0x70bd28, 0x25dfdbfc, {}, [@TIPC_NLA_NET={0x34, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x8}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x8ecf}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x9}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x401}, @TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x7f}]}, @TIPC_NLA_NODE={0xac, 0x6, 0x0, 0x1, [@TIPC_NLA_NODE_REKEYING={0x8, 0x6, 0x2}, @TIPC_NLA_NODE_KEY={0x41, 0x4, {'gcm(aes)\x00', 0x19, "261383bcca45d9d47656848d3559ed7f8d476b61111ee60688"}}, @TIPC_NLA_NODE_KEY_MASTER={0x4}, @TIPC_NLA_NODE_KEY_MASTER={0x4}, @TIPC_NLA_NODE_KEY={0x49, 0x4, {'gcm(aes)\x00', 0x21, "a7976d909e4ed9b773a6152d6888221afdac97d32b4b45c800860802a56909c4a4"}}, @TIPC_NLA_NODE_ADDR={0x8, 0x1, 0x200}]}, @TIPC_NLA_NET={0x64, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x9}, @TIPC_NLA_NET_NODEID_W1={0xc}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x7}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x80000001}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3ff}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x4}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x1000}, @TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x6}]}, @TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x80000000}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x7ff}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x7}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x5}]}, @TIPC_NLA_BEARER={0x3c, 0x1, 0x0, 0x1, [@TIPC_NLA_BEARER_UDP_OPTS={0x2c, 0x4, {{0x14, 0x1, @in={0x2, 0x4e21, @rand_addr=0x64010102}}, {0x14, 0x2, @in={0x2, 0x4e23, @multicast1}}}}, @TIPC_NLA_BEARER_PROP={0xc, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x6}]}]}, @TIPC_NLA_NET={0xc, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_ADDR={0x8, 0x2, 0x5}]}]}, 0x1c4}, 0x1, 0x0, 0x0, 0x20008880}, 0x2000c008) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) 17:04:07 executing program 0: r0 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x0, 0x0) ioctl$TUNSETIFF(r0, 0x400454ca, &(0x7f00000000c0)={'syzkaller1\x00', 0x2}) ioctl$TUNSETLINK(r0, 0x400454cd, 0x336) r1 = socket$kcm(0x2, 0xa, 0x2) (async) r2 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f00000015c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r3 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$IPCTNL_MSG_CT_NEW(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000011c0)={0x14, 0x0, 0x1, 0x401, 0x0, 0x0, {0x2}}, 0x14}}, 0x0) (async) r4 = socket$netlink(0x10, 0x3, 0x0) writev(r4, &(0x7f0000000080)=[{&(0x7f00000000c0)="390000001300090468fe0700000000000000ff3f04000000480100100000000004002b000a00010014a4ee1ee438d2fd000000000000007200", 0xfffffe10}], 0x1) (async) writev(r4, &(0x7f0000000000)=[{&(0x7f0000000040)="3900000013001104680907000000000f0000ff3f04000000290a001700000000040037000a00030014917c82aa75b9a64411f6a4ee1ee438d2", 0x39}], 0x1) (async) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001dc0)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0}}) r7 = socket$nl_generic(0x10, 0x3, 0x10) (async) r8 = socket$inet_udp(0x2, 0x2, 0x0) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r8, 0x8933, &(0x7f0000000080)) (async) r9 = socket$inet_udplite(0x2, 0x2, 0x88) setsockopt$IP_VS_SO_SET_STARTDAEMON(r9, 0x0, 0x48b, &(0x7f0000000500)={0x2, 'gre0\x00', 0x1}, 0x18) (async) ioctl$EXT4_IOC_CHECKPOINT(0xffffffffffffffff, 0x4004662b, &(0x7f0000000000)=0x2) (async) pipe(&(0x7f0000001440)={0xffffffffffffffff}) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='hugetlb.1GB.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_CHECKPOINT(r11, 0x4004662b, &(0x7f0000000000)=0x2) (async) r12 = bpf$OBJ_GET_MAP(0x7, &(0x7f00000014c0)={&(0x7f0000001480)='./file0\x00', 0x0, 0x8}, 0x10) bpf$PROG_LOAD(0x5, &(0x7f0000001540)={0xd, 0x3, &(0x7f0000001280)=@framed={{0x18, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0xff}}, &(0x7f0000001700)='syzkaller\x00', 0x1, 0x98, &(0x7f0000001300)=""/152, 0x41100, 0x14, '\x00', 0x0, 0x20, 0xffffffffffffffff, 0x8, &(0x7f00000013c0)={0x3, 0x2}, 0x8, 0x10, &(0x7f0000001400)={0x3, 0x3, 0x59, 0x7fffffff}, 0x10, 0xffffffffffffffff, r2, 0x0, &(0x7f0000001500)=[r2, 0x1, r8, r10, r11, r12]}, 0x80) (async) r13 = bpf$MAP_CREATE(0x0, &(0x7f0000000100)=@base={0x5, 0x100004, 0x20104, 0x9, 0x1, 0x1}, 0x48) bpf$MAP_UPDATE_BATCH(0x1a, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000300), &(0x7f0000000240), 0x402, r13}, 0x38) (async) sendmsg$BATADV_CMD_TP_METER(r7, &(0x7f0000000dc0)={0x0, 0x0, &(0x7f0000000d80)={&(0x7f0000001600)=ANY=[@ANYRES32=r13, @ANYRESOCT=r5, @ANYRESHEX, @ANYRESOCT, @ANYBLOB="f9717c515ea6e206df2423e1320d8ef87d776903646e5b2044583ba74da88965ee9194854b0aee5aba3fd2b86bf6ebad9d42b66bb6c135d0b436711cb3690b635027b1dd2f12889f0dd6e40511f6ba3b260082108d51b001f73a95974d05716b3515e64a83c6153b5959aabc5f0b3eb6939d33e583a9fb82067c107eb6000000000001008f56f3538992ca89c1", @ANYRESOCT=r4, @ANYRES8=r2], 0x30}, 0x1, 0x0, 0x0, 0x44001}, 0x0) ioctl(r2, 0x1ff, &(0x7f0000001200)="7c9a49b8cc36400ca9cdadea21e864f3574f2e942119581485939b79729790c0e67eb19dd8df790517dbcb250b15b773eec5d6ea734c9611158bd22ed51f73bd8ecbaaa741c20a909a6862075cec84df8d") (async) ioctl$BTRFS_IOC_RESIZE(r3, 0x50009403, &(0x7f0000000080)=ANY=[@ANYRES32=r2, @ANYBLOB='\x00\x00\x00\x00', @ANYRESDEC=r6, @ANYBLOB="3a06303030308473070fe37487fe303030303030333165"]) (async) ioctl$BTRFS_IOC_SUBVOL_CREATE_V2(r0, 0x50009418, &(0x7f00000001c0)={{r1}, 0x0, 0x1e, @inherit={0x50, &(0x7f0000000000)={0x1, 0x1, 0x401, 0x2, {0xc, 0x9, 0x3ff, 0x4, 0x2}, [0x8]}}, @devid=r6}) (async) ioctl$SIOCSIFHWADDR(r1, 0x8914, &(0x7f0000000180)={'syzkaller1\x00', @link_local}) close(r0) [ 2882.249589][T23223] validate_nla: 12 callbacks suppressed [ 2882.249615][T23223] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2882.346876][T23223] bond801: entered promiscuous mode [ 2882.352688][T23223] 8021q: adding VLAN 0 to HW filter on device bond801 17:04:07 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) (async) socket(0x0, 0x0, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000000), 0x8) sendmsg$TIPC_NL_PUBL_GET(r1, &(0x7f00000000c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x14000}, 0xc, &(0x7f0000000080)={&(0x7f0000000280)={0x1c4, 0x0, 0x400, 0x70bd28, 0x25dfdbfc, {}, [@TIPC_NLA_NET={0x34, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x8}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x8ecf}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x9}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x401}, @TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x7f}]}, @TIPC_NLA_NODE={0xac, 0x6, 0x0, 0x1, [@TIPC_NLA_NODE_REKEYING={0x8, 0x6, 0x2}, @TIPC_NLA_NODE_KEY={0x41, 0x4, {'gcm(aes)\x00', 0x19, "261383bcca45d9d47656848d3559ed7f8d476b61111ee60688"}}, @TIPC_NLA_NODE_KEY_MASTER={0x4}, @TIPC_NLA_NODE_KEY_MASTER={0x4}, @TIPC_NLA_NODE_KEY={0x49, 0x4, {'gcm(aes)\x00', 0x21, "a7976d909e4ed9b773a6152d6888221afdac97d32b4b45c800860802a56909c4a4"}}, @TIPC_NLA_NODE_ADDR={0x8, 0x1, 0x200}]}, @TIPC_NLA_NET={0x64, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x9}, @TIPC_NLA_NET_NODEID_W1={0xc}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x7}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x80000001}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3ff}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x4}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x1000}, @TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x6}]}, @TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x80000000}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x7ff}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x7}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x5}]}, @TIPC_NLA_BEARER={0x3c, 0x1, 0x0, 0x1, [@TIPC_NLA_BEARER_UDP_OPTS={0x2c, 0x4, {{0x14, 0x1, @in={0x2, 0x4e21, @rand_addr=0x64010102}}, {0x14, 0x2, @in={0x2, 0x4e23, @multicast1}}}}, @TIPC_NLA_BEARER_PROP={0xc, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x6}]}]}, @TIPC_NLA_NET={0xc, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_ADDR={0x8, 0x2, 0x5}]}]}, 0x1c4}, 0x1, 0x0, 0x0, 0x20008880}, 0x2000c008) (async) sendmsg$TIPC_NL_PUBL_GET(r1, &(0x7f00000000c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x14000}, 0xc, &(0x7f0000000080)={&(0x7f0000000280)={0x1c4, 0x0, 0x400, 0x70bd28, 0x25dfdbfc, {}, [@TIPC_NLA_NET={0x34, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x8}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x8ecf}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x9}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x401}, @TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x7f}]}, @TIPC_NLA_NODE={0xac, 0x6, 0x0, 0x1, [@TIPC_NLA_NODE_REKEYING={0x8, 0x6, 0x2}, @TIPC_NLA_NODE_KEY={0x41, 0x4, {'gcm(aes)\x00', 0x19, "261383bcca45d9d47656848d3559ed7f8d476b61111ee60688"}}, @TIPC_NLA_NODE_KEY_MASTER={0x4}, @TIPC_NLA_NODE_KEY_MASTER={0x4}, @TIPC_NLA_NODE_KEY={0x49, 0x4, {'gcm(aes)\x00', 0x21, "a7976d909e4ed9b773a6152d6888221afdac97d32b4b45c800860802a56909c4a4"}}, @TIPC_NLA_NODE_ADDR={0x8, 0x1, 0x200}]}, @TIPC_NLA_NET={0x64, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x9}, @TIPC_NLA_NET_NODEID_W1={0xc}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x7}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x80000001}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3ff}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x4}, @TIPC_NLA_NET_ADDR={0x8, 0x2, 0x1000}, @TIPC_NLA_NET_NODEID_W1={0xc, 0x4, 0x6}]}, @TIPC_NLA_PUBL={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_PUBL_TYPE={0x8, 0x1, 0x80000000}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x7ff}, @TIPC_NLA_PUBL_LOWER={0x8, 0x2, 0x7}, @TIPC_NLA_PUBL_UPPER={0x8, 0x3, 0x5}]}, @TIPC_NLA_BEARER={0x3c, 0x1, 0x0, 0x1, [@TIPC_NLA_BEARER_UDP_OPTS={0x2c, 0x4, {{0x14, 0x1, @in={0x2, 0x4e21, @rand_addr=0x64010102}}, {0x14, 0x2, @in={0x2, 0x4e23, @multicast1}}}}, @TIPC_NLA_BEARER_PROP={0xc, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x6}]}]}, @TIPC_NLA_NET={0xc, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_ADDR={0x8, 0x2, 0x5}]}]}, 0x1c4}, 0x1, 0x0, 0x0, 0x20008880}, 0x2000c008) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) 17:04:07 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x7}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2882.545233][T23227] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:04:07 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(r0, &(0x7f0000000280)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x44}, 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="d80000002000100025bd7000ffdbdf250a208001000400020000000014000200fe8800a100000000000000000000010114000200fc02c63e00000000009d000000000001140002002001000000000000000000000000000014000100fe80000000faff0000000000000000bb14000100000000000000000000000000ffffffffffffff7f010000000000000000000000000001140002000114000200fc02000000000000000000000000000014400200ff020000000000000000000000000001080017004e224e2100e9bed25bfe83aea6d551300268a4ddd444254cc9e68ad52c1f198c1d050a814e28723595799e535a3d124a63ae570aa0e9bb48163354b16694ca66bb3165969a48e0629ede420b8463dfb26eea420585d60f2065df0799b7d98c9d7b92d0474c4a564d1b23b6fb770b1165e6b17e36c8aba3669f"], 0xd8}}, 0x4040000) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) pipe(&(0x7f0000000100)={0xffffffffffffffff, 0xffffffffffffffff}) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(r1, 0x89f0, &(0x7f00000001c0)={'tunl0\x00', &(0x7f0000000400)=ANY=[@ANYBLOB='tunl0\x00'/16, @ANYRES32=0x0, @ANYBLOB="00200040000000015aa2cf37406d00400067000009299078e0000002e0000002441c0e81e0000001000000020000000000000009e000000100000000830f52ac1414bb7f000001ffffffff00dd32b5a4ef5be1b0823942fe3883f7bcaa516d5ddcf8b4a9bf4af9419e8edc5999bcefa1502e9c7f26b8bc7cc468f686d76d9b546595c9e05150f0a295dee238080fa65d3fd104f8818b1c566fabe25785ac10ed98fcda2df3e0c41fb342edee98f76dcbb11f3d925b5a4920a16e34451b1ceb0dc4132532cf547b2644cbac8b00daa0846fa4d68f8421cfc743e0a544b4b0fd0591a761917e0470452b1792a2ce47fb6fea80b784e167e6a82c2b827c649063a6fa514dd69e24e0879ec523a96f2a4400002b241e333de83eeb063107e9964a8b375ca818de3f9ff9bf6cf21cd8804d102fc6e4c286046cc40c1446dc8fa191326a234b3f008fb6c9a3bfc7e3ab1de02ffc3c9e10885c97eac02c045ae5da7d0a7e6bf7155a3780e6b713ecd4803b1338b9c832949f9cda0d1871379b93"]}) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="2c737171c5038a2b663348000000100b1fff0000000000000000000000004e22572932776f1316958ad0cd10ebd3a56620f8495b2f6854643f1f9c56a4b3a90a937fb0eee522041dc9d82ef5bb8d001ca3b9a7fb95447ffc885cd1a87b6ee0ed7381f6701ac472d9774911b3ecff44dc8d4a785af0e78b0809dfad0eacafb29b58e10d1779a55f4a", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x20040800) [ 2882.747757][T23227] bond1110: entered promiscuous mode [ 2882.757800][T23227] 8021q: adding VLAN 0 to HW filter on device bond1110 17:04:07 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(r0, &(0x7f0000000280)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x44}, 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="d80000002000100025bd7000ffdbdf250a208001000400020000000014000200fe8800a100000000000000000000010114000200fc02c63e00000000009d000000000001140002002001000000000000000000000000000014000100fe80000000faff0000000000000000bb14000100000000000000000000000000ffffffffffffff7f010000000000000000000000000001140002000114000200fc02000000000000000000000000000014400200ff020000000000000000000000000001080017004e224e2100e9bed25bfe83aea6d551300268a4ddd444254cc9e68ad52c1f198c1d050a814e28723595799e535a3d124a63ae570aa0e9bb48163354b16694ca66bb3165969a48e0629ede420b8463dfb26eea420585d60f2065df0799b7d98c9d7b92d0474c4a564d1b23b6fb770b1165e6b17e36c8aba3669f"], 0xd8}}, 0x4040000) (async) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) pipe(&(0x7f0000000100)={0xffffffffffffffff, 0xffffffffffffffff}) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(r1, 0x89f0, &(0x7f00000001c0)={'tunl0\x00', &(0x7f0000000400)=ANY=[@ANYBLOB='tunl0\x00'/16, @ANYRES32=0x0, @ANYBLOB="00200040000000015aa2cf37406d00400067000009299078e0000002e0000002441c0e81e0000001000000020000000000000009e000000100000000830f52ac1414bb7f000001ffffffff00dd32b5a4ef5be1b0823942fe3883f7bcaa516d5ddcf8b4a9bf4af9419e8edc5999bcefa1502e9c7f26b8bc7cc468f686d76d9b546595c9e05150f0a295dee238080fa65d3fd104f8818b1c566fabe25785ac10ed98fcda2df3e0c41fb342edee98f76dcbb11f3d925b5a4920a16e34451b1ceb0dc4132532cf547b2644cbac8b00daa0846fa4d68f8421cfc743e0a544b4b0fd0591a761917e0470452b1792a2ce47fb6fea80b784e167e6a82c2b827c649063a6fa514dd69e24e0879ec523a96f2a4400002b241e333de83eeb063107e9964a8b375ca818de3f9ff9bf6cf21cd8804d102fc6e4c286046cc40c1446dc8fa191326a234b3f008fb6c9a3bfc7e3ab1de02ffc3c9e10885c97eac02c045ae5da7d0a7e6bf7155a3780e6b713ecd4803b1338b9c832949f9cda0d1871379b93"]}) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="2c737171c5038a2b663348000000100b1fff0000000000000000000000004e22572932776f1316958ad0cd10ebd3a56620f8495b2f6854643f1f9c56a4b3a90a937fb0eee522041dc9d82ef5bb8d001ca3b9a7fb95447ffc885cd1a87b6ee0ed7381f6701ac472d9774911b3ecff44dc8d4a785af0e78b0809dfad0eacafb29b58e10d1779a55f4a", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x20040800) [ 2882.887847][T23228] bond1110: (slave bridge1136): making interface the new active one [ 2882.897573][T23228] bridge1136: entered promiscuous mode [ 2882.935485][T23228] bond1110: (slave bridge1136): Enslaving as an active interface with an up link 17:04:08 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3c6, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2883.044745][T23232] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:08 executing program 3: socket$netlink(0x10, 0x3, 0x0) (async) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(r0, &(0x7f0000000280)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x44}, 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="d80000002000100025bd7000ffdbdf250a208001000400020000000014000200fe8800a100000000000000000000010114000200fc02c63e00000000009d000000000001140002002001000000000000000000000000000014000100fe80000000faff0000000000000000bb14000100000000000000000000000000ffffffffffffff7f010000000000000000000000000001140002000114000200fc02000000000000000000000000000014400200ff020000000000000000000000000001080017004e224e2100e9bed25bfe83aea6d551300268a4ddd444254cc9e68ad52c1f198c1d050a814e28723595799e535a3d124a63ae570aa0e9bb48163354b16694ca66bb3165969a48e0629ede420b8463dfb26eea420585d60f2065df0799b7d98c9d7b92d0474c4a564d1b23b6fb770b1165e6b17e36c8aba3669f"], 0xd8}}, 0x4040000) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) pipe(&(0x7f0000000100)) (async) pipe(&(0x7f0000000100)={0xffffffffffffffff, 0xffffffffffffffff}) ioctl$sock_ipv4_tunnel_SIOCGETTUNNEL(r1, 0x89f0, &(0x7f00000001c0)={'tunl0\x00', &(0x7f0000000400)=ANY=[@ANYBLOB='tunl0\x00'/16, @ANYRES32=0x0, @ANYBLOB="00200040000000015aa2cf37406d00400067000009299078e0000002e0000002441c0e81e0000001000000020000000000000009e000000100000000830f52ac1414bb7f000001ffffffff00dd32b5a4ef5be1b0823942fe3883f7bcaa516d5ddcf8b4a9bf4af9419e8edc5999bcefa1502e9c7f26b8bc7cc468f686d76d9b546595c9e05150f0a295dee238080fa65d3fd104f8818b1c566fabe25785ac10ed98fcda2df3e0c41fb342edee98f76dcbb11f3d925b5a4920a16e34451b1ceb0dc4132532cf547b2644cbac8b00daa0846fa4d68f8421cfc743e0a544b4b0fd0591a761917e0470452b1792a2ce47fb6fea80b784e167e6a82c2b827c649063a6fa514dd69e24e0879ec523a96f2a4400002b241e333de83eeb063107e9964a8b375ca818de3f9ff9bf6cf21cd8804d102fc6e4c286046cc40c1446dc8fa191326a234b3f008fb6c9a3bfc7e3ab1de02ffc3c9e10885c97eac02c045ae5da7d0a7e6bf7155a3780e6b713ecd4803b1338b9c832949f9cda0d1871379b93"]}) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="2c737171c5038a2b663348000000100b1fff0000000000000000000000004e22572932776f1316958ad0cd10ebd3a56620f8495b2f6854643f1f9c56a4b3a90a937fb0eee522041dc9d82ef5bb8d001ca3b9a7fb95447ffc885cd1a87b6ee0ed7381f6701ac472d9774911b3ecff44dc8d4a785af0e78b0809dfad0eacafb29b58e10d1779a55f4a", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x20040800) 17:04:08 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x23}]}, 0x3c}}, 0x0) [ 2883.058017][T23232] workqueue: Failed to create a rescuer kthread for wq "bond679": -EINTR [ 2883.195089][T23237] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:04:08 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) connect$pppl2tp(0xffffffffffffffff, &(0x7f0000000000)=@pppol2tp={0x18, 0x1, {0x0, r0, {0x2, 0x4e20, @empty}, 0x3, 0x4, 0x0, 0x1}}, 0x26) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) r1 = syz_init_net_socket$802154_dgram(0x24, 0x2, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r1, 0x81f8943c, &(0x7f0000000280)) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2883.233533][T23237] workqueue: Failed to create a rescuer kthread for wq "bond1038": -EINTR [ 2883.350025][T23252] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:08 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:08 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb, 0x300}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2883.394929][T23252] workqueue: Failed to create a rescuer kthread for wq "bond802": -EINTR [ 2883.467666][T23279] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2883.575880][T23279] bond1111: entered promiscuous mode [ 2883.583234][T23279] 8021q: adding VLAN 0 to HW filter on device bond1111 [ 2883.681602][T23286] bond1111: (slave bridge1137): making interface the new active one [ 2883.691591][T23286] bridge1137: entered promiscuous mode [ 2883.704864][T23286] bond1111: (slave bridge1137): Enslaving as an active interface with an up link [ 2883.715596][T23292] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:08 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3ca, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2883.831427][T23292] bond679: entered promiscuous mode [ 2883.837330][T23292] 8021q: adding VLAN 0 to HW filter on device bond679 17:04:08 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) r2 = syz_genetlink_get_family_id$ipvs(&(0x7f00000003c0), r1) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000340)={r0}) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r1, 0x8933, &(0x7f0000000440)={'batadv0\x00', 0x0}) sendmsg$DEVLINK_CMD_SB_POOL_SET(r4, &(0x7f0000000a00)={&(0x7f0000000700)={0x10, 0x0, 0x0, 0x86000000}, 0xc, &(0x7f00000009c0)={&(0x7f0000000a40)=ANY=[@ANYBLOB="4402000026188e34299792486b0eaba82b63b3b1d21df48036fd83f15bf40064c4348b02395dba5a65c573e0bc89fed8e26b0862903dd291ae8fc60c225d59f616a098d4ebc61007aa8bf27a816218d98beab1389c8cc05d072e0dc3862ca00b9be1b2eac84516c58008b43882", @ANYRES16=0x0, @ANYBLOB="200028bd7000fedbdf25100000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b000100000006001100ff0700000800130000f8ffff05001400000000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b0002000000060011000200000008001300040000000500140001000000080001007063690011000200303030303a30303a31302e300000000008000b00afc100000600110001000000080013008100000005001400010000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b00000000000600110000000000080013000000008005001400010000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b0000100000060011007a00000008001300ff00000005001400000000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b00b604000006001100fdff000008001300050000000500140000000000080001007063690011000200303030303a30303a31302e300000000008000b00aabc0000060011000600000008001300060000000500140000000000080001007063690011000200303030303a30303a31302e300000000008000b0001000000060011008000000008001300ffffffff0500140000000000080001007063690011000200303030303a30303a31302e300000000008000b0009000000060011000557000008001300000800000500140000000000"], 0x244}, 0x1, 0x0, 0x0, 0x4000040}, 0x24048004) sendmsg$nl_route(r4, &(0x7f0000000580)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000480)={&(0x7f0000000500)=@mpls_getnetconf={0x44, 0x52, 0x10, 0x70bd2d, 0x25dfdbfc, {}, [@IGNORE_ROUTES_WITH_LINKDOWN={0x8}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x7ff}, @NETCONFA_IFINDEX={0x8, 0x1, r3}, @NETCONFA_IFINDEX={0x8, 0x1, r5}, @NETCONFA_IFINDEX={0x8, 0x1, r3}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x4}, 0x10) sendmsg$nl_route(r0, &(0x7f0000000180)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c0000001000010400000400ec00000000000000", @ANYRES32=r3, @ANYBLOB="00000000000000001c0012000b000100627269646765"], 0x3c}}, 0x0) r6 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000140)='freezer.parent_freezing\x00', 0x0, 0x0) getsockname$packet(r6, &(0x7f0000000240)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @multicast}, &(0x7f0000000300)=0x14) r7 = socket$netlink(0x10, 0x3, 0x0) getsockname$packet(r1, &(0x7f00000001c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000004c0)=0x14) sendmsg$nl_route(r7, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=@newlink={0x28, 0x10, 0x825, 0x0, 0x0, {0xa, 0x0, 0x0, r8}, [@IFLA_PROTO_DOWN={0x8, 0xa, 0xf}]}, 0x28}}, 0x0) r9 = socket$nl_route(0x10, 0x3, 0x0) r10 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) setsockopt$inet_group_source_req(r4, 0x0, 0x2c, &(0x7f00000005c0)={0x10000, {{0x2, 0x4e21, @multicast2}}, {{0x2, 0x4e23, @empty}}}, 0x108) sendmsg$nl_route(r9, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000000)=ANY=[@ANYRES8=r11, @ANYRES32=r11, @ANYRESDEC=r2], 0x20}}, 0x0) 17:04:08 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x24}]}, 0x3c}}, 0x0) 17:04:08 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) connect$pppl2tp(0xffffffffffffffff, &(0x7f0000000000)=@pppol2tp={0x18, 0x1, {0x0, r0, {0x2, 0x4e20, @empty}, 0x3, 0x4, 0x0, 0x1}}, 0x26) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) r1 = syz_init_net_socket$802154_dgram(0x24, 0x2, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r1, 0x81f8943c, &(0x7f0000000280)) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2883.938203][T23300] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2884.057364][T23300] bond1038: entered promiscuous mode [ 2884.084717][T23300] 8021q: adding VLAN 0 to HW filter on device bond1038 [ 2884.183998][T23304] bond1038: (slave bridge1104): making interface the new active one [ 2884.194167][T23304] bridge1104: entered promiscuous mode [ 2884.215707][T23304] bond1038: (slave bridge1104): Enslaving as an active interface with an up link 17:04:09 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x48, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2884.240918][T23306] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2884.295364][T23306] bond802: entered promiscuous mode [ 2884.302507][T23306] 8021q: adding VLAN 0 to HW filter on device bond802 [ 2884.316034][T23310] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:04:09 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2884.397972][T23310] bond1112: entered promiscuous mode [ 2884.405962][T23310] 8021q: adding VLAN 0 to HW filter on device bond1112 [ 2884.474506][T23311] bond1112: (slave bridge1138): making interface the new active one [ 2884.501096][T23311] bridge1138: entered promiscuous mode [ 2884.514677][T23311] bond1112: (slave bridge1138): Enslaving as an active interface with an up link 17:04:09 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3d2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2884.603313][T23314] bond680: entered promiscuous mode [ 2884.615175][T23314] 8021q: adding VLAN 0 to HW filter on device bond680 17:04:09 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) connect$pppl2tp(0xffffffffffffffff, &(0x7f0000000000)=@pppol2tp={0x18, 0x1, {0x0, r0, {0x2, 0x4e20, @empty}, 0x3, 0x4, 0x0, 0x1}}, 0x26) socket(0x0, 0x0, 0x0) (async) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) r1 = syz_init_net_socket$802154_dgram(0x24, 0x2, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r1, 0x81f8943c, &(0x7f0000000280)) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2884.701217][T23323] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. 17:04:09 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x25}]}, 0x3c}}, 0x0) 17:04:09 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) (async) r1 = socket(0x10, 0x803, 0x0) r2 = syz_genetlink_get_family_id$ipvs(&(0x7f00000003c0), r1) (async) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000340)={r0}) (async) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r1, 0x8933, &(0x7f0000000440)={'batadv0\x00', 0x0}) sendmsg$DEVLINK_CMD_SB_POOL_SET(r4, &(0x7f0000000a00)={&(0x7f0000000700)={0x10, 0x0, 0x0, 0x86000000}, 0xc, &(0x7f00000009c0)={&(0x7f0000000a40)=ANY=[@ANYBLOB="4402000026188e34299792486b0eaba82b63b3b1d21df48036fd83f15bf40064c4348b02395dba5a65c573e0bc89fed8e26b0862903dd291ae8fc60c225d59f616a098d4ebc61007aa8bf27a816218d98beab1389c8cc05d072e0dc3862ca00b9be1b2eac84516c58008b43882", @ANYRES16=0x0, @ANYBLOB="200028bd7000fedbdf25100000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b000100000006001100ff0700000800130000f8ffff05001400000000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b0002000000060011000200000008001300040000000500140001000000080001007063690011000200303030303a30303a31302e300000000008000b00afc100000600110001000000080013008100000005001400010000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b00000000000600110000000000080013000000008005001400010000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b0000100000060011007a00000008001300ff00000005001400000000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b00b604000006001100fdff000008001300050000000500140000000000080001007063690011000200303030303a30303a31302e300000000008000b00aabc0000060011000600000008001300060000000500140000000000080001007063690011000200303030303a30303a31302e300000000008000b0001000000060011008000000008001300ffffffff0500140000000000080001007063690011000200303030303a30303a31302e300000000008000b0009000000060011000557000008001300000800000500140000000000"], 0x244}, 0x1, 0x0, 0x0, 0x4000040}, 0x24048004) (async) sendmsg$nl_route(r4, &(0x7f0000000580)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000480)={&(0x7f0000000500)=@mpls_getnetconf={0x44, 0x52, 0x10, 0x70bd2d, 0x25dfdbfc, {}, [@IGNORE_ROUTES_WITH_LINKDOWN={0x8}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x7ff}, @NETCONFA_IFINDEX={0x8, 0x1, r3}, @NETCONFA_IFINDEX={0x8, 0x1, r5}, @NETCONFA_IFINDEX={0x8, 0x1, r3}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x4}, 0x10) sendmsg$nl_route(r0, &(0x7f0000000180)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c0000001000010400000400ec00000000000000", @ANYRES32=r3, @ANYBLOB="00000000000000001c0012000b000100627269646765"], 0x3c}}, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000140)='freezer.parent_freezing\x00', 0x0, 0x0) getsockname$packet(r6, &(0x7f0000000240)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @multicast}, &(0x7f0000000300)=0x14) (async) r7 = socket$netlink(0x10, 0x3, 0x0) (async) getsockname$packet(r1, &(0x7f00000001c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000004c0)=0x14) sendmsg$nl_route(r7, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=@newlink={0x28, 0x10, 0x825, 0x0, 0x0, {0xa, 0x0, 0x0, r8}, [@IFLA_PROTO_DOWN={0x8, 0xa, 0xf}]}, 0x28}}, 0x0) (async) r9 = socket$nl_route(0x10, 0x3, 0x0) (async) r10 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) setsockopt$inet_group_source_req(r4, 0x0, 0x2c, &(0x7f00000005c0)={0x10000, {{0x2, 0x4e21, @multicast2}}, {{0x2, 0x4e23, @empty}}}, 0x108) (async) sendmsg$nl_route(r9, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000000)=ANY=[@ANYRES8=r11, @ANYRES32=r11, @ANYRESDEC=r2], 0x20}}, 0x0) [ 2884.933851][T23324] bond0: (slave bridge906): Enslaving as an active interface with an up link 17:04:10 executing program 0: r0 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 64) r1 = socket(0x10, 0x803, 0x0) (rerun: 64) r2 = syz_genetlink_get_family_id$ipvs(&(0x7f00000003c0), r1) (async) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000340)={r0}) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r1, 0x8933, &(0x7f0000000440)={'batadv0\x00', 0x0}) (async) sendmsg$DEVLINK_CMD_SB_POOL_SET(r4, &(0x7f0000000a00)={&(0x7f0000000700)={0x10, 0x0, 0x0, 0x86000000}, 0xc, &(0x7f00000009c0)={&(0x7f0000000a40)=ANY=[@ANYBLOB="4402000026188e34299792486b0eaba82b63b3b1d21df48036fd83f15bf40064c4348b02395dba5a65c573e0bc89fed8e26b0862903dd291ae8fc60c225d59f616a098d4ebc61007aa8bf27a816218d98beab1389c8cc05d072e0dc3862ca00b9be1b2eac84516c58008b43882", @ANYRES16=0x0, @ANYBLOB="200028bd7000fedbdf25100000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b000100000006001100ff0700000800130000f8ffff05001400000000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b0002000000060011000200000008001300040000000500140001000000080001007063690011000200303030303a30303a31302e300000000008000b00afc100000600110001000000080013008100000005001400010000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b00000000000600110000000000080013000000008005001400010000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b0000100000060011007a00000008001300ff00000005001400000000000e0001006e657464657673696d0000000f0002006e657464657673696d30000008000b00b604000006001100fdff000008001300050000000500140000000000080001007063690011000200303030303a30303a31302e300000000008000b00aabc0000060011000600000008001300060000000500140000000000080001007063690011000200303030303a30303a31302e300000000008000b0001000000060011008000000008001300ffffffff0500140000000000080001007063690011000200303030303a30303a31302e300000000008000b0009000000060011000557000008001300000800000500140000000000"], 0x244}, 0x1, 0x0, 0x0, 0x4000040}, 0x24048004) sendmsg$nl_route(r4, &(0x7f0000000580)={&(0x7f0000000400)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000480)={&(0x7f0000000500)=@mpls_getnetconf={0x44, 0x52, 0x10, 0x70bd2d, 0x25dfdbfc, {}, [@IGNORE_ROUTES_WITH_LINKDOWN={0x8}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8, 0x6, 0x7ff}, @NETCONFA_IFINDEX={0x8, 0x1, r3}, @NETCONFA_IFINDEX={0x8, 0x1, r5}, @NETCONFA_IFINDEX={0x8, 0x1, r3}, @IGNORE_ROUTES_WITH_LINKDOWN={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x4}, 0x10) sendmsg$nl_route(r0, &(0x7f0000000180)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c0000001000010400000400ec00000000000000", @ANYRES32=r3, @ANYBLOB="00000000000000001c0012000b000100627269646765"], 0x3c}}, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000140)='freezer.parent_freezing\x00', 0x0, 0x0) getsockname$packet(r6, &(0x7f0000000240)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @multicast}, &(0x7f0000000300)=0x14) r7 = socket$netlink(0x10, 0x3, 0x0) (async) getsockname$packet(r1, &(0x7f00000001c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000004c0)=0x14) sendmsg$nl_route(r7, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=@newlink={0x28, 0x10, 0x825, 0x0, 0x0, {0xa, 0x0, 0x0, r8}, [@IFLA_PROTO_DOWN={0x8, 0xa, 0xf}]}, 0x28}}, 0x0) (async, rerun: 32) r9 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 32) r10 = socket$packet(0x11, 0x3, 0x300) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) setsockopt$inet_group_source_req(r4, 0x0, 0x2c, &(0x7f00000005c0)={0x10000, {{0x2, 0x4e21, @multicast2}}, {{0x2, 0x4e23, @empty}}}, 0x108) (async, rerun: 64) sendmsg$nl_route(r9, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000000)=ANY=[@ANYRES8=r11, @ANYRES32=r11, @ANYRESDEC=r2], 0x20}}, 0x0) (rerun: 64) [ 2885.042183][T23329] bond1039: entered promiscuous mode [ 2885.051202][T23329] 8021q: adding VLAN 0 to HW filter on device bond1039 17:04:10 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2885.146082][T23330] bond1039: (slave bridge1105): making interface the new active one [ 2885.155800][T23330] bridge1105: entered promiscuous mode [ 2885.167579][T23330] bond1039: (slave bridge1105): Enslaving as an active interface with an up link [ 2885.282701][T23332] bond803: entered promiscuous mode [ 2885.289182][T23332] 8021q: adding VLAN 0 to HW filter on device bond803 17:04:10 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4, 0x2, 0x0, 0x0}}}, @IFLA_MASTER={0x8}]}, 0x3c}}, 0x0) [ 2885.431213][T23336] bond1113: entered promiscuous mode [ 2885.437026][T23336] 8021q: adding VLAN 0 to HW filter on device bond1113 [ 2885.511268][T23337] bond1113: (slave bridge1139): making interface the new active one [ 2885.521486][T23337] bridge1139: entered promiscuous mode [ 2885.542165][T23337] bond1113: (slave bridge1139): Enslaving as an active interface with an up link 17:04:10 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3da, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:10 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x4d12}, [@IFLA_LINKINFO={0x2c, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x1c, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_LAST_MEMBER_INTVL={0xc, 0x1e, 0x101}]}}}]}, 0x4c}}, 0x0) [ 2885.694691][T23346] bond681: entered promiscuous mode [ 2885.704812][T23346] 8021q: adding VLAN 0 to HW filter on device bond681 17:04:10 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x26}]}, 0x3c}}, 0x0) 17:04:10 executing program 0: sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x38}}, 0x0) r0 = socket(0x1000000010, 0x80002, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=@newlink={0x20}, 0x20}}, 0x0) sendmsg$can_bcm(0xffffffffffffffff, &(0x7f0000000180)={&(0x7f0000000000), 0x10, 0x0}, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000300)={&(0x7f0000000000)=ANY=[@ANYBLOB="5400000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000003400128009000100626f6e64000000002400028008000700002b020005000e000000000008001f"], 0x54}}, 0x0) sendmmsg$alg(r0, &(0x7f0000000200), 0x10efe10675dec16, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000200), 0xf000) sendfile(r1, r2, 0x0, 0xf03b0000) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r3, r2, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$IPVS_CMD_GET_DEST(r3, &(0x7f0000000140)={&(0x7f0000000080), 0xc, &(0x7f00000000c0)={&(0x7f0000000340)={0x11c, 0x0, 0x1, 0x70bd27, 0x25dfdbff, {}, [@IPVS_CMD_ATTR_SERVICE={0x48, 0x1, 0x0, 0x1, [@IPVS_SVC_ATTR_FWMARK={0x8}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0xff}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0x4}, @IPVS_SVC_ATTR_NETMASK={0x8, 0x9, 0x4a}, @IPVS_SVC_ATTR_PORT={0x6, 0x4, 0x4e24}, @IPVS_SVC_ATTR_AF={0x6, 0x1, 0x14}, @IPVS_SVC_ATTR_ADDR={0x14, 0x3, @ipv4=@loopback}]}, @IPVS_CMD_ATTR_DAEMON={0x5c, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_MCAST_IFN={0x14, 0x2, 'gre0\x00'}, @IPVS_DAEMON_ATTR_MCAST_PORT={0x6, 0x7, 0x4e20}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8}, @IPVS_DAEMON_ATTR_MCAST_TTL={0x5, 0x8, 0xc0}, @IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x2}, @IPVS_DAEMON_ATTR_SYNC_MAXLEN={0x6, 0x4, 0x4}, @IPVS_DAEMON_ATTR_MCAST_GROUP6={0x14, 0x6, @local}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8, 0x3, 0x2}]}, @IPVS_CMD_ATTR_DEST={0x2c, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x4}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e23}, @IPVS_DEST_ATTR_FWD_METHOD={0x8}, @IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x3}, @IPVS_DEST_ATTR_WEIGHT={0x8, 0x4, 0x4}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x3}, @IPVS_CMD_ATTR_DAEMON={0x14, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_SYNC_MAXLEN={0x6, 0x4, 0x1}, @IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x1}]}, @IPVS_CMD_ATTR_DEST={0x14, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ACTIVE_CONNS={0x8, 0x7, 0x3ff}, @IPVS_DEST_ATTR_TUN_TYPE={0x5}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x81}]}, 0x11c}, 0x1, 0x0, 0x0, 0x4004000}, 0x10) [ 2885.808413][T23363] bond1040: entered promiscuous mode [ 2885.814304][T23363] 8021q: adding VLAN 0 to HW filter on device bond1040 [ 2885.952944][T23364] bond1040: (slave bridge1106): making interface the new active one [ 2885.963433][T23364] bridge1106: entered promiscuous mode [ 2885.982203][T23364] bond1040: (slave bridge1106): Enslaving as an active interface with an up link 17:04:11 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x4c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2886.094508][T23368] bond804: entered promiscuous mode [ 2886.105746][T23368] 8021q: adding VLAN 0 to HW filter on device bond804 17:04:11 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0xf}}, 0x0) [ 2886.257345][T23373] bond1114: entered promiscuous mode [ 2886.274753][T23373] 8021q: adding VLAN 0 to HW filter on device bond1114 [ 2886.383016][T23375] bond1114: (slave bridge1140): making interface the new active one [ 2886.395018][T23375] bridge1140: entered promiscuous mode [ 2886.411456][T23375] bond1114: (slave bridge1140): Enslaving as an active interface with an up link 17:04:11 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3e2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:11 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x4d12}, [@IFLA_LINKINFO={0x2c, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x1c, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_LAST_MEMBER_INTVL={0xc, 0x1e, 0x101}]}}}]}, 0x4c}}, 0x0) [ 2886.464143][T23381] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2886.489108][T23381] (unnamed net_device) (uninitialized): Removing last ns target with arp_interval on 17:04:11 executing program 0: sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x38}}, 0x0) (async) r0 = socket(0x1000000010, 0x80002, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=@newlink={0x20}, 0x20}}, 0x0) (async) sendmsg$can_bcm(0xffffffffffffffff, &(0x7f0000000180)={&(0x7f0000000000), 0x10, 0x0}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000300)={&(0x7f0000000000)=ANY=[@ANYBLOB="5400000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000003400128009000100626f6e64000000002400028008000700002b020005000e000000000008001f"], 0x54}}, 0x0) (async) sendmmsg$alg(r0, &(0x7f0000000200), 0x10efe10675dec16, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000200), 0xf000) (async) sendfile(r1, r2, 0x0, 0xf03b0000) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r3, r2, &(0x7f00000002c0)=0x335773c3, 0x8) sendmsg$IPVS_CMD_GET_DEST(r3, &(0x7f0000000140)={&(0x7f0000000080), 0xc, &(0x7f00000000c0)={&(0x7f0000000340)={0x11c, 0x0, 0x1, 0x70bd27, 0x25dfdbff, {}, [@IPVS_CMD_ATTR_SERVICE={0x48, 0x1, 0x0, 0x1, [@IPVS_SVC_ATTR_FWMARK={0x8}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0xff}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0x4}, @IPVS_SVC_ATTR_NETMASK={0x8, 0x9, 0x4a}, @IPVS_SVC_ATTR_PORT={0x6, 0x4, 0x4e24}, @IPVS_SVC_ATTR_AF={0x6, 0x1, 0x14}, @IPVS_SVC_ATTR_ADDR={0x14, 0x3, @ipv4=@loopback}]}, @IPVS_CMD_ATTR_DAEMON={0x5c, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_MCAST_IFN={0x14, 0x2, 'gre0\x00'}, @IPVS_DAEMON_ATTR_MCAST_PORT={0x6, 0x7, 0x4e20}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8}, @IPVS_DAEMON_ATTR_MCAST_TTL={0x5, 0x8, 0xc0}, @IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x2}, @IPVS_DAEMON_ATTR_SYNC_MAXLEN={0x6, 0x4, 0x4}, @IPVS_DAEMON_ATTR_MCAST_GROUP6={0x14, 0x6, @local}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8, 0x3, 0x2}]}, @IPVS_CMD_ATTR_DEST={0x2c, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x4}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e23}, @IPVS_DEST_ATTR_FWD_METHOD={0x8}, @IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x3}, @IPVS_DEST_ATTR_WEIGHT={0x8, 0x4, 0x4}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x3}, @IPVS_CMD_ATTR_DAEMON={0x14, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_SYNC_MAXLEN={0x6, 0x4, 0x1}, @IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x1}]}, @IPVS_CMD_ATTR_DEST={0x14, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ACTIVE_CONNS={0x8, 0x7, 0x3ff}, @IPVS_DEST_ATTR_TUN_TYPE={0x5}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x81}]}, 0x11c}, 0x1, 0x0, 0x0, 0x4004000}, 0x10) [ 2886.688298][T23385] bond682: entered promiscuous mode [ 2886.694513][T23385] 8021q: adding VLAN 0 to HW filter on device bond682 17:04:11 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x27}]}, 0x3c}}, 0x0) [ 2886.828510][T23390] bond1041: entered promiscuous mode [ 2886.849863][T23390] 8021q: adding VLAN 0 to HW filter on device bond1041 [ 2886.998334][T23393] bond1041: (slave bridge1107): making interface the new active one [ 2887.023090][T23393] bridge1107: entered promiscuous mode [ 2887.048152][T23393] bond1041: (slave bridge1107): Enslaving as an active interface with an up link 17:04:12 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x60, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2887.137660][T23396] bond805: entered promiscuous mode [ 2887.150243][T23396] 8021q: adding VLAN 0 to HW filter on device bond805 17:04:12 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x10}}, 0x0) [ 2887.304657][T23401] bond1115: entered promiscuous mode [ 2887.321676][T23401] 8021q: adding VLAN 0 to HW filter on device bond1115 [ 2887.422878][T23413] bond1115: (slave bridge1141): making interface the new active one [ 2887.432644][T23413] bridge1141: entered promiscuous mode [ 2887.446987][T23413] bond1115: (slave bridge1141): Enslaving as an active interface with an up link 17:04:12 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3ea, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:12 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x4d12}, [@IFLA_LINKINFO={0x2c, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x1c, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_LAST_MEMBER_INTVL={0xc, 0x1e, 0x101}]}}}]}, 0x4c}}, 0x0) [ 2887.513060][T23410] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2887.532287][T23410] (unnamed net_device) (uninitialized): Removing last ns target with arp_interval on 17:04:12 executing program 0: sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x38}}, 0x0) r0 = socket(0x1000000010, 0x80002, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000040)={&(0x7f0000000100)=@newlink={0x20}, 0x20}}, 0x0) sendmsg$can_bcm(0xffffffffffffffff, &(0x7f0000000180)={&(0x7f0000000000), 0x10, 0x0}, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000300)={&(0x7f0000000000)=ANY=[@ANYBLOB="5400000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000003400128009000100626f6e64000000002400028008000700002b020005000e000000000008001f"], 0x54}}, 0x0) (async) sendmmsg$alg(r0, &(0x7f0000000200), 0x10efe10675dec16, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000200), 0xf000) (async, rerun: 64) sendfile(r1, r2, 0x0, 0xf03b0000) (rerun: 64) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r3, r2, &(0x7f00000002c0)=0x335773c3, 0x8) (async) sendmsg$IPVS_CMD_GET_DEST(r3, &(0x7f0000000140)={&(0x7f0000000080), 0xc, &(0x7f00000000c0)={&(0x7f0000000340)={0x11c, 0x0, 0x1, 0x70bd27, 0x25dfdbff, {}, [@IPVS_CMD_ATTR_SERVICE={0x48, 0x1, 0x0, 0x1, [@IPVS_SVC_ATTR_FWMARK={0x8}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0xff}, @IPVS_SVC_ATTR_TIMEOUT={0x8, 0x8, 0x4}, @IPVS_SVC_ATTR_NETMASK={0x8, 0x9, 0x4a}, @IPVS_SVC_ATTR_PORT={0x6, 0x4, 0x4e24}, @IPVS_SVC_ATTR_AF={0x6, 0x1, 0x14}, @IPVS_SVC_ATTR_ADDR={0x14, 0x3, @ipv4=@loopback}]}, @IPVS_CMD_ATTR_DAEMON={0x5c, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_MCAST_IFN={0x14, 0x2, 'gre0\x00'}, @IPVS_DAEMON_ATTR_MCAST_PORT={0x6, 0x7, 0x4e20}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8}, @IPVS_DAEMON_ATTR_MCAST_TTL={0x5, 0x8, 0xc0}, @IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x2}, @IPVS_DAEMON_ATTR_SYNC_MAXLEN={0x6, 0x4, 0x4}, @IPVS_DAEMON_ATTR_MCAST_GROUP6={0x14, 0x6, @local}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8, 0x3, 0x2}]}, @IPVS_CMD_ATTR_DEST={0x2c, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x4}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e23}, @IPVS_DEST_ATTR_FWD_METHOD={0x8}, @IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x3}, @IPVS_DEST_ATTR_WEIGHT={0x8, 0x4, 0x4}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x3}, @IPVS_CMD_ATTR_DAEMON={0x14, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_SYNC_MAXLEN={0x6, 0x4, 0x1}, @IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x1}]}, @IPVS_CMD_ATTR_DEST={0x14, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ACTIVE_CONNS={0x8, 0x7, 0x3ff}, @IPVS_DEST_ATTR_TUN_TYPE={0x5}]}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x81}]}, 0x11c}, 0x1, 0x0, 0x0, 0x4004000}, 0x10) [ 2887.550681][T23410] workqueue: Failed to create a rescuer kthread for wq "bond510": -EINTR [ 2887.662656][T23419] validate_nla: 12 callbacks suppressed [ 2887.662681][T23419] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2887.696630][T23419] workqueue: Failed to create a rescuer kthread for wq "bond683": -EINTR [ 2887.771353][T23420] netlink: 'syz-executor.4': attribute type 39 has an invalid length. 17:04:12 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x28}]}, 0x3c}}, 0x0) [ 2887.837895][T23424] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2887.970945][T23424] bond1042: entered promiscuous mode [ 2887.976978][T23424] 8021q: adding VLAN 0 to HW filter on device bond1042 [ 2888.162435][T23425] bond1042: (slave bridge1108): making interface the new active one [ 2888.172083][T23425] bridge1108: entered promiscuous mode [ 2888.194306][T23425] bond1042: (slave bridge1108): Enslaving as an active interface with an up link 17:04:13 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x62, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2888.284386][T23428] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:13 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x140}}, 0x0) [ 2888.315318][T23428] workqueue: Failed to create a rescuer kthread for wq "bond806": -EINTR [ 2888.383474][T23433] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2888.451273][T23433] bond1116: entered promiscuous mode [ 2888.461925][T23433] 8021q: adding VLAN 0 to HW filter on device bond1116 [ 2888.483777][T23458] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2888.527647][T23458] bond806: entered promiscuous mode [ 2888.534820][T23458] 8021q: adding VLAN 0 to HW filter on device bond806 17:04:13 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3ee, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2888.596568][T23437] bond1116: (slave bridge1142): making interface the new active one [ 2888.605661][T23437] bridge1142: entered promiscuous mode [ 2888.619608][T23437] bond1116: (slave bridge1142): Enslaving as an active interface with an up link [ 2888.683611][T23445] netlink: 8 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2888.696845][T23445] (unnamed net_device) (uninitialized): Removing last ns target with arp_interval on 17:04:13 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) r2 = accept(r1, &(0x7f0000000000)=@l2={0x1f, 0x0, @none}, &(0x7f0000000080)=0x80) ioctl$sock_ipv6_tunnel_SIOCGET6RD(r1, 0x89f8, &(0x7f0000000200)={'sit0\x00', &(0x7f0000000180)={'syztnl2\x00', 0x0, 0x80, 0x700, 0x2, 0x5, {{0xc, 0x4, 0x1, 0x11, 0x30, 0x66, 0x0, 0xc8, 0x2f, 0x0, @loopback, @multicast2, {[@generic={0x82, 0xd, "72fbee9453606defd218bd"}, @ssrr={0x89, 0xf, 0xd4, [@loopback, @empty, @loopback]}]}}}}}) sendmsg$ETHTOOL_MSG_EEE_SET(r2, &(0x7f0000000340)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000300)={&(0x7f0000000280)={0x5c, 0x0, 0x400, 0x70bd28, 0x25dfdbfc, {}, [@ETHTOOL_A_EEE_TX_LPI_ENABLED={0x5}, @ETHTOOL_A_EEE_HEADER={0x30, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r3}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x1}, @ETHTOOL_A_HEADER_FLAGS={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'syzkaller1\x00'}]}, @ETHTOOL_A_EEE_TX_LPI_TIMER={0x8, 0x7, 0x8000}, @ETHTOOL_A_EEE_TX_LPI_TIMER={0x8, 0x7, 0x9}]}, 0x5c}, 0x1, 0x0, 0x0, 0x8000}, 0x4008814) 17:04:13 executing program 0: r0 = socket$nl_generic(0x10, 0x3, 0x10) r1 = syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$SEG6_CMD_SETHMAC(r0, &(0x7f0000000100)={0x0, 0xe00, &(0x7f00000000c0)={&(0x7f0000000040)={0x34, r1, 0x1, 0x0, 0x0, {}, [@SEG6_ATTR_HMACKEYID={0x8, 0x3, 0x1f}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x4}, @SEG6_ATTR_SECRET={0x8, 0x4, [0x0]}, @SEG6_ATTR_ALGID={0x5}]}, 0x34}}, 0x0) r2 = socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000500)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000000000000ae9c9d8bc9a20b8715cce8354b0f063cfc63c18a38872af52a08caafc31cf4dfb53fbf8fec53cd829e88e802007c0f2178e0e1f079724ef3ab530e2f16ceeff21901a3eea32bf0b57f118700000000000000000000000000099a317797dca8a0dcb01375e4efdc7f3b22d435", @ANYRES32=r2], 0x4}}, 0x0) r4 = socket$igmp(0x2, 0x3, 0x2) r5 = bpf$ITER_CREATE(0x21, &(0x7f00000002c0), 0x8) socket$rxrpc(0x21, 0x2, 0xa) r6 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) openat$cgroup_ro(r7, &(0x7f00000006c0)='rdma.current\x00', 0x0, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000680)=0x40000005, 0x12) sendfile(r8, r7, 0x0, 0x800000000000c) r9 = syz_genetlink_get_family_id$devlink(&(0x7f0000000080), 0xffffffffffffffff) r10 = gettid() r11 = socket$inet6(0xa, 0x1, 0x8010000000000084) setsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY(r8, 0x84, 0x18, &(0x7f0000000580)={0x0, 0x57}, 0x8) bind$inet6(r11, &(0x7f00000000c0)={0xa, 0x4e21, 0x0, @empty}, 0x1c) connect$inet6(r11, &(0x7f0000000000)={0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x39}}}, 0x1c) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r11, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) write$binfmt_misc(r6, &(0x7f00000005c0)=ANY=[@ANYRESHEX=r9, @ANYRES32=r8, @ANYRES32=r8, @ANYRES64=r10, @ANYRESHEX=r10, @ANYRESOCT=r9], 0x4) sendmsg$DEVLINK_CMD_PORT_GET(r5, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000001}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf0, r9, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8, 0x3, 0x1}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf0}, 0x1, 0x0, 0x0, 0x10}, 0x0) setsockopt$IP_VS_SO_SET_ADD(r4, 0x0, 0x482, &(0x7f0000000080)={0x84, @rand_addr, 0x0, 0xff00, 'wrr\x00', 0x0, 0x7fffffff, 0x36}, 0x2c) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r12, 0x0, 0x8000000000004) openat$cgroup_ro(r12, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) bind$bt_hci(r12, &(0x7f0000000640)={0x1f, 0xffffffffffffffff, 0x1}, 0x6) sendmsg$BATADV_CMD_GET_HARDIF(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e9510c6c22007409cd4b65ba7aa96c667ed5a5d", @ANYRES16=r3, @ANYBLOB="000826bd7000fbdbdf250500000005002a00010000000500370000000000050033000200000008002c0003000000"], 0x34}}, 0x21) [ 2888.715069][T23445] workqueue: Failed to create a rescuer kthread for wq "bond510": -EINTR [ 2888.800246][T23450] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:13 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x29}]}, 0x3c}}, 0x0) [ 2888.831815][T23450] workqueue: Failed to create a rescuer kthread for wq "bond683": -EINTR [ 2888.915677][T23454] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2889.033552][T23454] bond1043: entered promiscuous mode [ 2889.041240][T23454] 8021q: adding VLAN 0 to HW filter on device bond1043 17:04:14 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x68, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2889.106755][T23455] bond1043: (slave bridge1109): making interface the new active one [ 2889.116001][T23455] bridge1109: entered promiscuous mode [ 2889.129836][T23455] bond1043: (slave bridge1109): Enslaving as an active interface with an up link 17:04:14 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0xec0}}, 0x0) [ 2889.284515][T23462] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2889.364827][T23462] bond1117: entered promiscuous mode [ 2889.374563][T23462] 8021q: adding VLAN 0 to HW filter on device bond1117 17:04:14 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3f0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2889.452032][T23465] bond1117: (slave bridge1143): making interface the new active one [ 2889.460357][T23465] bridge1143: entered promiscuous mode [ 2889.473450][T23465] bond1117: (slave bridge1143): Enslaving as an active interface with an up link [ 2889.535096][T23476] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:14 executing program 3: socket$netlink(0x10, 0x3, 0x0) (async) r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) r2 = accept(r1, &(0x7f0000000000)=@l2={0x1f, 0x0, @none}, &(0x7f0000000080)=0x80) ioctl$sock_ipv6_tunnel_SIOCGET6RD(r1, 0x89f8, &(0x7f0000000200)={'sit0\x00', &(0x7f0000000180)={'syztnl2\x00', 0x0, 0x80, 0x700, 0x2, 0x5, {{0xc, 0x4, 0x1, 0x11, 0x30, 0x66, 0x0, 0xc8, 0x2f, 0x0, @loopback, @multicast2, {[@generic={0x82, 0xd, "72fbee9453606defd218bd"}, @ssrr={0x89, 0xf, 0xd4, [@loopback, @empty, @loopback]}]}}}}}) (async) ioctl$sock_ipv6_tunnel_SIOCGET6RD(r1, 0x89f8, &(0x7f0000000200)={'sit0\x00', &(0x7f0000000180)={'syztnl2\x00', 0x0, 0x80, 0x700, 0x2, 0x5, {{0xc, 0x4, 0x1, 0x11, 0x30, 0x66, 0x0, 0xc8, 0x2f, 0x0, @loopback, @multicast2, {[@generic={0x82, 0xd, "72fbee9453606defd218bd"}, @ssrr={0x89, 0xf, 0xd4, [@loopback, @empty, @loopback]}]}}}}}) sendmsg$ETHTOOL_MSG_EEE_SET(r2, &(0x7f0000000340)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000300)={&(0x7f0000000280)={0x5c, 0x0, 0x400, 0x70bd28, 0x25dfdbfc, {}, [@ETHTOOL_A_EEE_TX_LPI_ENABLED={0x5}, @ETHTOOL_A_EEE_HEADER={0x30, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r3}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x1}, @ETHTOOL_A_HEADER_FLAGS={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'syzkaller1\x00'}]}, @ETHTOOL_A_EEE_TX_LPI_TIMER={0x8, 0x7, 0x8000}, @ETHTOOL_A_EEE_TX_LPI_TIMER={0x8, 0x7, 0x9}]}, 0x5c}, 0x1, 0x0, 0x0, 0x8000}, 0x4008814) [ 2889.620132][T23476] bond683: entered promiscuous mode [ 2889.626065][T23476] 8021q: adding VLAN 0 to HW filter on device bond683 [ 2889.744971][T23482] bond1044: entered promiscuous mode [ 2889.753760][T23482] 8021q: adding VLAN 0 to HW filter on device bond1044 17:04:14 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x2a}]}, 0x3c}}, 0x0) 17:04:14 executing program 0: r0 = socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) (async) r1 = syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$SEG6_CMD_SETHMAC(r0, &(0x7f0000000100)={0x0, 0xe00, &(0x7f00000000c0)={&(0x7f0000000040)={0x34, r1, 0x1, 0x0, 0x0, {}, [@SEG6_ATTR_HMACKEYID={0x8, 0x3, 0x1f}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x4}, @SEG6_ATTR_SECRET={0x8, 0x4, [0x0]}, @SEG6_ATTR_ALGID={0x5}]}, 0x34}}, 0x0) r2 = socket$rxrpc(0x21, 0x2, 0xa) syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) (async) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000500)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000000000000ae9c9d8bc9a20b8715cce8354b0f063cfc63c18a38872af52a08caafc31cf4dfb53fbf8fec53cd829e88e802007c0f2178e0e1f079724ef3ab530e2f16ceeff21901a3eea32bf0b57f118700000000000000000000000000099a317797dca8a0dcb01375e4efdc7f3b22d435", @ANYRES32=r2], 0x4}}, 0x0) socket$igmp(0x2, 0x3, 0x2) (async) r4 = socket$igmp(0x2, 0x3, 0x2) r5 = bpf$ITER_CREATE(0x21, &(0x7f00000002c0), 0x8) socket$rxrpc(0x21, 0x2, 0xa) r6 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) openat$cgroup_ro(r7, &(0x7f00000006c0)='rdma.current\x00', 0x0, 0x0) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000680)=0x40000005, 0x12) sendfile(r8, r7, 0x0, 0x800000000000c) (async) sendfile(r8, r7, 0x0, 0x800000000000c) r9 = syz_genetlink_get_family_id$devlink(&(0x7f0000000080), 0xffffffffffffffff) r10 = gettid() r11 = socket$inet6(0xa, 0x1, 0x8010000000000084) setsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY(r8, 0x84, 0x18, &(0x7f0000000580)={0x0, 0x57}, 0x8) (async) setsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY(r8, 0x84, 0x18, &(0x7f0000000580)={0x0, 0x57}, 0x8) bind$inet6(r11, &(0x7f00000000c0)={0xa, 0x4e21, 0x0, @empty}, 0x1c) connect$inet6(r11, &(0x7f0000000000)={0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x39}}}, 0x1c) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r11, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) write$binfmt_misc(r6, &(0x7f00000005c0)=ANY=[@ANYRESHEX=r9, @ANYRES32=r8, @ANYRES32=r8, @ANYRES64=r10, @ANYRESHEX=r10, @ANYRESOCT=r9], 0x4) (async) write$binfmt_misc(r6, &(0x7f00000005c0)=ANY=[@ANYRESHEX=r9, @ANYRES32=r8, @ANYRES32=r8, @ANYRES64=r10, @ANYRESHEX=r10, @ANYRESOCT=r9], 0x4) sendmsg$DEVLINK_CMD_PORT_GET(r5, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000001}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf0, r9, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8, 0x3, 0x1}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf0}, 0x1, 0x0, 0x0, 0x10}, 0x0) (async) sendmsg$DEVLINK_CMD_PORT_GET(r5, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000001}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf0, r9, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8, 0x3, 0x1}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf0}, 0x1, 0x0, 0x0, 0x10}, 0x0) setsockopt$IP_VS_SO_SET_ADD(r4, 0x0, 0x482, &(0x7f0000000080)={0x84, @rand_addr, 0x0, 0xff00, 'wrr\x00', 0x0, 0x7fffffff, 0x36}, 0x2c) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r12, 0x0, 0x8000000000004) (async) sendfile(0xffffffffffffffff, r12, 0x0, 0x8000000000004) openat$cgroup_ro(r12, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) bind$bt_hci(r12, &(0x7f0000000640)={0x1f, 0xffffffffffffffff, 0x1}, 0x6) (async) bind$bt_hci(r12, &(0x7f0000000640)={0x1f, 0xffffffffffffffff, 0x1}, 0x6) sendmsg$BATADV_CMD_GET_HARDIF(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e9510c6c22007409cd4b65ba7aa96c667ed5a5d", @ANYRES16=r3, @ANYBLOB="000826bd7000fbdbdf250500000005002a00010000000500370000000000050033000200000008002c0003000000"], 0x34}}, 0x21) (async) sendmsg$BATADV_CMD_GET_HARDIF(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e9510c6c22007409cd4b65ba7aa96c667ed5a5d", @ANYRES16=r3, @ANYBLOB="000826bd7000fbdbdf250500000005002a00010000000500370000000000050033000200000008002c0003000000"], 0x34}}, 0x21) [ 2889.843968][T23483] bond1044: (slave bridge1110): making interface the new active one [ 2889.853607][T23483] bridge1110: entered promiscuous mode [ 2889.867763][T23483] bond1044: (slave bridge1110): Enslaving as an active interface with an up link 17:04:14 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x6c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:15 executing program 0: r0 = socket$nl_generic(0x10, 0x3, 0x10) (async) r1 = syz_genetlink_get_family_id$SEG6(&(0x7f0000000480), 0xffffffffffffffff) sendmsg$SEG6_CMD_SETHMAC(r0, &(0x7f0000000100)={0x0, 0xe00, &(0x7f00000000c0)={&(0x7f0000000040)={0x34, r1, 0x1, 0x0, 0x0, {}, [@SEG6_ATTR_HMACKEYID={0x8, 0x3, 0x1f}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x4}, @SEG6_ATTR_SECRET={0x8, 0x4, [0x0]}, @SEG6_ATTR_ALGID={0x5}]}, 0x34}}, 0x0) (async) r2 = socket$rxrpc(0x21, 0x2, 0xa) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000007580), 0xffffffffffffffff) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000007680)={0x0, 0x0, &(0x7f0000007640)={&(0x7f0000000500)=ANY=[@ANYBLOB="46040000", @ANYRES16=r3, @ANYBLOB="ff830500000000000000ae9c9d8bc9a20b8715cce8354b0f063cfc63c18a38872af52a08caafc31cf4dfb53fbf8fec53cd829e88e802007c0f2178e0e1f079724ef3ab530e2f16ceeff21901a3eea32bf0b57f118700000000000000000000000000099a317797dca8a0dcb01375e4efdc7f3b22d435", @ANYRES32=r2], 0x4}}, 0x0) (async) r4 = socket$igmp(0x2, 0x3, 0x2) (async) r5 = bpf$ITER_CREATE(0x21, &(0x7f00000002c0), 0x8) socket$rxrpc(0x21, 0x2, 0xa) r6 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) openat$cgroup_ro(r7, &(0x7f00000006c0)='rdma.current\x00', 0x0, 0x0) (async) write$cgroup_int(0xffffffffffffffff, &(0x7f0000000680)=0x40000005, 0x12) (async) sendfile(r8, r7, 0x0, 0x800000000000c) r9 = syz_genetlink_get_family_id$devlink(&(0x7f0000000080), 0xffffffffffffffff) (async) r10 = gettid() (async) r11 = socket$inet6(0xa, 0x1, 0x8010000000000084) setsockopt$inet_sctp_SCTP_AUTH_ACTIVE_KEY(r8, 0x84, 0x18, &(0x7f0000000580)={0x0, 0x57}, 0x8) (async) bind$inet6(r11, &(0x7f00000000c0)={0xa, 0x4e21, 0x0, @empty}, 0x1c) connect$inet6(r11, &(0x7f0000000000)={0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x39}}}, 0x1c) (async) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r11, 0x84, 0x9, &(0x7f0000000300)={0x0, @in6={{0xa, 0x0, 0x0, @empty}}, 0x0, 0x0, 0x300, 0x0, 0xb3550aa4ba878394}, 0x9c) (async) write$binfmt_misc(r6, &(0x7f00000005c0)=ANY=[@ANYRESHEX=r9, @ANYRES32=r8, @ANYRES32=r8, @ANYRES64=r10, @ANYRESHEX=r10, @ANYRESOCT=r9], 0x4) (async) sendmsg$DEVLINK_CMD_PORT_GET(r5, &(0x7f00000004c0)={&(0x7f0000000300)={0x10, 0x0, 0x0, 0x2000001}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0xf0, r9, 0x800, 0x70bd29, 0x25dfdbfd, {}, [{{@pci={{0x8}, {0x11}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x3}}}, {{@nsim={{0xe}, {0xf, 0x2, {'netdevsim', 0x0}}}, {0x8, 0x3, 0x1}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x2}}}, {{@pci={{0x8}, {0x11}}, {0x8, 0x3, 0x1}}}]}, 0xf0}, 0x1, 0x0, 0x0, 0x10}, 0x0) setsockopt$IP_VS_SO_SET_ADD(r4, 0x0, 0x482, &(0x7f0000000080)={0x84, @rand_addr, 0x0, 0xff00, 'wrr\x00', 0x0, 0x7fffffff, 0x36}, 0x2c) (async) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r12, 0x0, 0x8000000000004) openat$cgroup_ro(r12, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) (async) bind$bt_hci(r12, &(0x7f0000000640)={0x1f, 0xffffffffffffffff, 0x1}, 0x6) (async) sendmsg$BATADV_CMD_GET_HARDIF(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000140)={&(0x7f00000001c0)=ANY=[@ANYBLOB="5e06cd51664089fad1b9e65ee9928f9a6de36af22582223e699c22e45c93016f23b8bfc4d6cf6361099b1ff484e6e1a3ff1b82d02b215260a85ee5c5b9f8eff25ef0c1b6138f398a48a7acd63c382b73be0f6e947a826538e69109b4a2224dad1a84122ad73917d2ee481d53f88c85b2a489d86bbde28053d5941e47dc5e9510c6c22007409cd4b65ba7aa96c667ed5a5d", @ANYRES16=r3, @ANYBLOB="000826bd7000fbdbdf250500000005002a00010000000500370000000000050033000200000008002c0003000000"], 0x34}}, 0x21) [ 2889.994284][T23486] bond807: entered promiscuous mode [ 2890.000388][T23486] 8021q: adding VLAN 0 to HW filter on device bond807 [ 2890.117004][T23491] bond1118: entered promiscuous mode [ 2890.123303][T23491] 8021q: adding VLAN 0 to HW filter on device bond1118 17:04:15 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x29}]}, 0x3c}}, 0x0) 17:04:15 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) r2 = accept(r1, &(0x7f0000000000)=@l2={0x1f, 0x0, @none}, &(0x7f0000000080)=0x80) ioctl$sock_ipv6_tunnel_SIOCGET6RD(r1, 0x89f8, &(0x7f0000000200)={'sit0\x00', &(0x7f0000000180)={'syztnl2\x00', 0x0, 0x80, 0x700, 0x2, 0x5, {{0xc, 0x4, 0x1, 0x11, 0x30, 0x66, 0x0, 0xc8, 0x2f, 0x0, @loopback, @multicast2, {[@generic={0x82, 0xd, "72fbee9453606defd218bd"}, @ssrr={0x89, 0xf, 0xd4, [@loopback, @empty, @loopback]}]}}}}}) (async) ioctl$sock_ipv6_tunnel_SIOCGET6RD(r1, 0x89f8, &(0x7f0000000200)={'sit0\x00', &(0x7f0000000180)={'syztnl2\x00', 0x0, 0x80, 0x700, 0x2, 0x5, {{0xc, 0x4, 0x1, 0x11, 0x30, 0x66, 0x0, 0xc8, 0x2f, 0x0, @loopback, @multicast2, {[@generic={0x82, 0xd, "72fbee9453606defd218bd"}, @ssrr={0x89, 0xf, 0xd4, [@loopback, @empty, @loopback]}]}}}}}) sendmsg$ETHTOOL_MSG_EEE_SET(r2, &(0x7f0000000340)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000300)={&(0x7f0000000280)={0x5c, 0x0, 0x400, 0x70bd28, 0x25dfdbfc, {}, [@ETHTOOL_A_EEE_TX_LPI_ENABLED={0x5}, @ETHTOOL_A_EEE_HEADER={0x30, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r3}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x1}, @ETHTOOL_A_HEADER_FLAGS={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'syzkaller1\x00'}]}, @ETHTOOL_A_EEE_TX_LPI_TIMER={0x8, 0x7, 0x8000}, @ETHTOOL_A_EEE_TX_LPI_TIMER={0x8, 0x7, 0x9}]}, 0x5c}, 0x1, 0x0, 0x0, 0x8000}, 0x4008814) (async) sendmsg$ETHTOOL_MSG_EEE_SET(r2, &(0x7f0000000340)={&(0x7f00000000c0)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000300)={&(0x7f0000000280)={0x5c, 0x0, 0x400, 0x70bd28, 0x25dfdbfc, {}, [@ETHTOOL_A_EEE_TX_LPI_ENABLED={0x5}, @ETHTOOL_A_EEE_HEADER={0x30, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r3}, @ETHTOOL_A_HEADER_FLAGS={0x8, 0x3, 0x1}, @ETHTOOL_A_HEADER_FLAGS={0x8}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'syzkaller1\x00'}]}, @ETHTOOL_A_EEE_TX_LPI_TIMER={0x8, 0x7, 0x8000}, @ETHTOOL_A_EEE_TX_LPI_TIMER={0x8, 0x7, 0x9}]}, 0x5c}, 0x1, 0x0, 0x0, 0x8000}, 0x4008814) [ 2890.180323][T23493] bond1118: (slave bridge1144): making interface the new active one [ 2890.189417][T23493] bridge1144: entered promiscuous mode [ 2890.204000][T23493] bond1118: (slave bridge1144): Enslaving as an active interface with an up link 17:04:15 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x33fe0}}, 0x0) 17:04:15 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3f2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2890.374517][T23503] bond684: entered promiscuous mode [ 2890.382538][T23503] 8021q: adding VLAN 0 to HW filter on device bond684 [ 2890.459904][T23534] bond1119: entered promiscuous mode [ 2890.466030][T23534] 8021q: adding VLAN 0 to HW filter on device bond1119 17:04:15 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x2b}]}, 0x3c}}, 0x0) [ 2890.587895][T23511] bond1045: entered promiscuous mode [ 2890.603478][T23511] 8021q: adding VLAN 0 to HW filter on device bond1045 [ 2890.675646][T23518] bond1045: (slave bridge1111): making interface the new active one [ 2890.683918][T23518] bridge1111: entered promiscuous mode [ 2890.697404][T23518] bond1045: (slave bridge1111): Enslaving as an active interface with an up link 17:04:15 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x74, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:15 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f00000000c0)=ANY=[@ANYBLOB="5800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="000000000000000075ff12800b0001006261746164760000280002800c0001004241544d414e5f56040001004241544d414e5f560c0001004241544d414e5f56acb28478fbaa1ab308f4b2eff3f36558dbec1c0c16ff78dfbd29c263e0c8a05b551d40933dcaaf26a4e704"], 0x58}}, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000000)={0xffffffffffffffff}) getsockname$packet(r2, &(0x7f0000000040)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000080)=0x14) [ 2890.907657][T23532] bond808: entered promiscuous mode [ 2890.923321][T23532] 8021q: adding VLAN 0 to HW filter on device bond808 [ 2891.003757][T23530] bond510: entered promiscuous mode [ 2891.010593][T23530] 8021q: adding VLAN 0 to HW filter on device bond510 17:04:16 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x2}, 0x0) 17:04:16 executing program 0: r0 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_tx_ring(r0, 0x107, 0x5, &(0x7f0000000100)=@req3={0x8000, 0x6, 0x8000, 0x6}, 0x1c) sendto$packet(r0, &(0x7f0000000580)="572e6ce9426bb12116f72baef9cd5c7f31e018", 0x13, 0x10, &(0x7f00000006c0)={0x11, 0x1, 0x0, 0x1, 0x4, 0x6, @random="f396d2d0759e"}, 0x14) r1 = socket$inet6(0xa, 0x2, 0x0) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x26, 0x80000, 0x0) syz_genetlink_get_family_id$devlink(&(0x7f0000000240), r3) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000000c0)=0x14) getsockname$packet(r3, &(0x7f0000000280)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r2, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000180)=ANY=[@ANYBLOB="7c0000001000390400"/20, @ANYRES32=r5, @ANYBLOB="83080000000000005c0012800b000100697036746e6c00004c00028008000700ffff000014000300", @ANYRES32=r4], 0x7c}}, 0x0) getsockopt$PNPIPE_IFINDEX(0xffffffffffffffff, 0x113, 0x2, &(0x7f0000000c40)=0x0, &(0x7f0000000c80)=0x4) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) sendfile(r7, r8, 0x0, 0xf03b0000) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r9, r8, &(0x7f00000002c0)=0x335773c3, 0x8) bind$packet(r9, &(0x7f0000000540)={0x11, 0x1a, r4, 0x1, 0x6, 0x6, @random="73e9ab7fb3d6"}, 0x14) sendmmsg$inet(r1, &(0x7f0000000d80)=[{{&(0x7f0000000040)={0x2, 0x63, @remote}, 0x10, 0x0, 0x0, &(0x7f0000000000)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @empty, @initdev={0xac, 0x1e, 0x0, 0x0}}}}], 0x20}}, {{0x0, 0x0, 0x0, 0x0, 0xfffffffffffffffc}}, {{&(0x7f0000000140)={0x2, 0x4e21, @initdev={0xac, 0x1e, 0x0, 0x0}}, 0x10, &(0x7f0000000f40)=[{&(0x7f00000002c0)="dc87ae05385c19d6e8ed2d5958c96165d8ab1a829a3cea158ff38ec71a1978f56531e0f25371cccf61b6700d1c9cc25ea72d3c24f0690df682b643222f4497125903e59147029069b74f63d35f24e307a77ebd42683eff8fdc98bd250930429ce6a0c3c5da7dee48c39b80038079d34d1ca0e2eb2a6dd682117e50249fa873add9f2946c24f62a1f98fd58e18d71029657e30e92c85f02e35b6321e799a38d06928a99affd91c706a903108bcec86bf20300ccaa20809fead1ce2a50bac9ee436fc900212feebdd56836b0d2143a5f615b3c2792a942cc217507ec191299e523ac289b3e0dfbb2a6dd3bcc78d16b28", 0xef}, {&(0x7f0000000fc0)="b39c1f56d08b488343fcccfa0c8fbbe27259dbbf04aa9b2613943c72272bc9ef7142fd774c551413f961b33a4bf9d6d690de08e031b27327b3319bef0373d269a18f063d2ad2cdf8b1c6c7047e350f578663cc3e12bca7d301e181fa6dcf4486d2af15d6b00b6f0afe2c9a520d5c9e9bfc5b387432d1254a98132509fc", 0x7d}, {&(0x7f0000000440)="ad61563d14c5e0fed504a2ec73618a9015f3e912ba0a2ae092082a2e1940a8780e3f4f97d7765f7516763f8fa6d2fd03a3b72b7903717e7e89776acf8bb423ec3f16fc370ec5844e560f434bb2c1cdebe592eba037852b252de0663eb45e28725c924b3771292da6070b51ceb564fabbe5d0f73e637ca014161d", 0x7a}, {&(0x7f0000000ec0)="c0217b9b9a52455554742a7fb0a53399b53a9e3b98f3a292f3391620bd2fc204a7d7d489e6c43186fb0e7004c5eadc3392077bcbf7e2a807591fcfebc5c03a2b8556158367090868c5b2dc603a03aee0d1c6f0d88ceb8f205885f71a678b8e9f667ea019ea3a46039d526d", 0x6b}, {&(0x7f00000005c0)="333425b8bf13b81ce96d57f28b0a0c2ead98f99c6357f6fcb7e507b81d9697228a99efec5f95c6d94ca345a5d35246d0724d2ad197acb1b548bae8474fd0fc1620b1d083cba852d761f1da5cf0bd1806a846f535d162c89a3221822fc1e7c776faac237149f66f59991eae894a29d4fe8c0f4e41d06b5e6638bdd86816509ab7e85032ae4c110506f81064bd99cafd357067b3bcdc4956d28d96468a33dd751f2103cfe108153f2858d8dfee23271178abdd9dc5c56c0404e5a592c0e437a15d1a94d3d1288e53d507a04ffb309ff382c0446a42d98f065644044481b1792d92e2b0337270271c4d2db49061f1e95becaa8c1c9e3ec243b4c1a2960b21", 0xfd}, {&(0x7f00000004c0)="16d722aa7b6d13f7c902e510ab49dab4a7ce5b384dfc86e048b35cc83588825884c1abf2fe179142f68ebb98972677c9d4fec1a31efa84", 0x37}], 0x6}}, {{&(0x7f0000000740)={0x2, 0x4e22, @loopback}, 0x10, &(0x7f00000008c0)=[{&(0x7f0000000780)="ea57116eaa1b0f0fd2c885d2654ca2e3a7a677ad0c0594c9121851f4626bb5d5f6d0bc7e8bf95cd17257449e1faa4cad", 0x30}, {&(0x7f00000007c0)="ad5e4ecbc071ed99de56c354712042e8b8fb1f7bc9d129f8abbc8c609cdd6e6fdad4100a8cdc4364b6609374d0d5ff14c413aa4845abd526bb211f1176361e5c2e45336b5c2a6dab88aa5758a43b6bd1b8edc49ddf1178bc452ebad98a4730cef6023bf1c138243e2528aa8744bac0a45897ceb6d4fea2d2ae0e0a7962a28129bae0a28f8f865fc6badad6b5e647b5dcac2cdf46333cbdaef6336132457dc51056147033b6e83ad5d60344df2ee7a1d32cb3fb49bd840a18c17665209aaabb4198f8d22925a9a6c4b27baae614eaa5a304019c651e100abfe7f58299f83388a97f8a5a194343f78045e51d698c51fe9902b7183442620bb5b44c59686cbbfe", 0xff}], 0x2}}, {{&(0x7f0000000900)={0x2, 0x4e20, @dev={0xac, 0x14, 0x14, 0x25}}, 0x10, &(0x7f0000000bc0)=[{&(0x7f0000000940)="9bbe318b08799c2b09893789f703", 0xe}, {&(0x7f0000000980)="adc3540f64c28db8cb5f98ec16b2d3d2783519282da6f1582fd6acdb2042f1ce4770b19079ab95e5a7a6571931f86c78ae7cf409be906214d3e709887b86304426095733c1d19aabee67c000716129ece11a6f7e7c15da004ad7360ceed8d7cd84dac44d32d23a826f4ce2f11a3b6d24258b27deda9b8bd976c0e3ce3d60c70f8fd68586363ae6ccc141f608f3f3fa527daf66099b23332b2285a5e18e2b6db87c88", 0xa2}, {&(0x7f0000000a40)="4d4f3a1c328254bed4b1c5450374bf5fa8b680b1c51b73d37cf632b43a54097963fa199855c8640fc0119f30cd416bfdb13fadc7cf8b3a71b9040dfeb3cbdd9c5efb779620008cecf1e52d05639eb8b0d1b85d9a3aad8f16c37e547756903c3864f43c7834539906076400741210f7f30b680465d7652cb629cb24c6dba1cefe2477aed6bad008ad939a356d661afe3ee60a949206f523daa2e50557053f497adfd34acccf0684524caf29f0e395b8cf9e40", 0xb2}, {&(0x7f0000001280)="78cb8d4da48da241aee34e5214efc55e817e5299aefe89a5fad5c369f097b0b4804e4e45a819add0e37f8316065d92648481ae4fd98455bd905e107d6bb4c2c47ce7c92c183978a55aed4e55e3d784b9545cb58dce3f668a9362f555cf78f5fcd411e4453377801fd92aa5a65d922ddea461a71039a429e79b874ef6290b6fadc6fc1af7fbe7763412209ae55212d97bcad4f7f5c31ccf7418ec7746193c0f8ff22ab19698aadf810d85cd9b765c619cd612ae26cd3d3c6cd92a346ced9ebc028e6cdc4affe5455a307983440a02071def9a6e183580d91235093ab9220088ebdeecef19f6ded9a14d9e988e9a0f6792aa1561e83da436e20db9e6a6e85d8155c73b20cdee5ced44f3fa7a5d570b6a1eaf8e7d601f1dc3bb6adb8a3faa44c5713aa2c14468f56e3b367381594f983e07cdb146babe5795a1daa110e52ad9449bec0007de93ff8dacf15130ab29591c83c4ed48f218f954e724d60328562eafbbc642348d5b521a2ad7c63530d73ec5d8eeef4c526a5902a8fdb4c53180d35211ace2c26e20c97aae6ed2c135c3ea4c65b6baef154a7997dc3dae44cd1175977f5c59ff64ad40a5b707cb3ac681356f92e3b1fe635cddad54406d2d831b28217c10aa8fa45981779175528e2f8686f384669e2deeea2fb286c52e2bc3f2d1d19257605372263333a5f0d1673630b9208877caddd546fb42775afd6f50eb35491422c0d8df3db5fbc71e461edae26091d5efcd4385cd636ffd3be606e362bce2cf4a66b84885e6f9658968ff01ee81ff967ae8365b79d55c8939745a8bede19e332aa7532cc0155cd5eb5480e32ff7a94b203e4db70784e32364c1f070b5af1c944e513bed80bb3a02cb722917181b8d313918dce28be4f815009b3026c787f345263c34f56b172ae62880cacc8e1338d7935a319bf375860b0d7f502cea86aec94d7fe6084fc21ef1ca8c0427a5952eebdb12e6cda414ca381afd21093f773244a3c7f632daddfc877fe6148b164906c6deb7bac84982691318c448b73bf634ba80819528d56063c5a760137ed5cc4c9c674e56168ff5a84c91681881733f4813b22961757f267c4ffc2798e6ad4dd21f27d0e7a61d7921d4f252b30b799e622041cb18426a12bd9f77be2b10e56ad9d26ab97eedc85ef92e6ec4212201766ddd826a012c3a5377a927d203f59a547449498c9091d0f7f76a8ed1fa6841b14d7086400b3c13f4c8d61edf1e477c1fcecc23ccb25b0a4476a4de55b2d45cde8dc8d3fb5c44d8fa1f2f8881f47fbe058ec1f66df6f24c00b904873965f5a6d3ae24d79a39c59b8122785d3d2a7986060a72b79572fe01a3aa43c8cdb45a565450f42e0f9f4120be3fa2fe4f2df848044f2034f01a618bdd86f93b4518955f695bb2a8a026da92c331d7e5daf90ca1875bea747097dfa16d3da173eb2e24f36d4ba9f567b3035734acb018cb2f99b6e2d22bc4b18cbd522d7abdbd2aaab40ecec9e2c9b6754027950a9726808239916d23c8be6eb902721d9983cda3821de1b7fc3132bd2ca421fc03eaffd6fb51db22ffe5a8603f48277ccad1a79722ef2ae469960a226ccd46cbbe15b1acacddc8d2863c3ecac6d99f83f204bd24c381045fef1832dc15374104a9000cde56a0555b7408b00a56d61a348359a4e30de2605d39b9694999b607a9d5a559071c95ec4f1ece63245141497ea486394daee029a39b59c4f283f3b93ba675b5f1e5cd76e4858a0d0af22a2619d3403d2e75f5803b4a765c68e58578b6c3ae7a0611a7a89740ee0dd93c195019ce3e563507e3924f51a8695a79e6a2c05f072d7813d3ba215926bbdb345818216cc7ae39d46f11e82775f3bbe3679b4eb4faa4d2b5c1fe1595a899847fbec391a8235b6f37bd2fc527a44a54978f63d6e241e4e12157ea2e53647f88d4bb9aca48955317576e912335b20f09d5bf6ea4ccc7f83f7ecf3cfc039166f1c88947679fb16864b8257c2af3c807560d30eb5d226ac3e834a9ebde3159d2b1c9de963b3217c52e005816c339f672b137902991deab2ba7d8a837d5a68345f9db6aadff13647654705e4739bfa50f6e6065361255c26658952b00f770be26764d8e3730e085ca68c541ab04ac5985694fca2545485cc132ae33fb0ffc40e3535b318632ba4be7e0fbb77b57bb1889479fd89a72912b7c81ae8dea9dc2083edc597a8a4c0327d5e2d26a270b1688365a1d551657c36b16dc321c7361085b660d1c6e452148f03fbd67142a59c73e49bf5f1182f5688dfd557bd8448bb09b15e13339fca077562d120614c535d748cb338c2bef477b606082b10e51634ffa639bc728ece96189f1a3f48d9e06decd47dc32f01e51ffc531ea9e4a92a0f3b136c0597f4e85e0f00d45061660e80f6417fe56ebda9a2532eec07fdb7b16a9e156b1f95f0df46286434e96038ca06bb0a5b1c721356c15c51122313830cd75fd6fd64800e8281612e72ad99a8aef10a39a3bad8f92e031ed6321081b9d1f0c5c64dc406affa89fec40cf150e9035c8658a7d0a63da5bd713530475e80699261e3cdb88e8563ff9ca792a62d83185f686fac1d012b630c1b919baaf494ead9624932985436eea97019a8bd1b258bdd765c44c93952409cfc3f6506510ab6b48d51885d9af2a5033bfc204390e2896341af68ec7d5f95ca7a5cf40e5c1361f5c9224008ba54d5c0e299935f1c3f779d942b546036277a1e3065ca664f2a234ae7852c79bf681c6745a3568a8ce10b9453d05ce4fbd5631c7c5d22e9df045b34c8fedebb9f0b6232f7949462f11ffb0c8b8ea2c716803d9bb5bc6c11ed3ff9031d1a26e3b0e89d51d7e2aca47ae6323967c547d07345d0b93d9fa1cd3c478bc49e354641a43cac4f8f61b637a1fdfd6157e0e9b6d6afcde54ca9319774ce437b80e5f65e7b863eec04e67c362bc21c63f368d9d40bffaab025f220f5562d0a1441cfd484f074396b68c21dffc5a4dfb071b8de2f3aa89b747593448bfcdc254febbb8207d19c8df54b7a3f208e0686784d21cb75e7321bb7cb041719223abeb9e73f64f085122fd837c88a2aa0dcc4da793ed4155a2fc11c82405cc52652400d44050d2b54883c09123c3c3bb8f4c6a72f737b1d81debea5472b0b041ed5ac0555a1f39074c5dcd88f4d7d94363261dfc17c37917a937318390fbfb5480754eaa2b53b1545d516563adfeba4fbcdade5e79aed55e5a60889b4f69c814da4f57d5cc3daefd55b93efca01f013334ac882060e1aa50f5ddf0f7e6b79b3ff53688b76c3008fa7a5e5f6497192a1c04ed4fe85ec026871d2478b3d27199fb938a97c4ae0cd59d29e810bee98afba9e052e144de58aad7e2e8748419ae2afa997a59576bf7ce4df35328a38729495750028bba8838c3835875a0f3a7bc19da0e798e86c1ada59fb5b31a86f6daa34ac3a9a2729935c662bf82e3b79d3737ebf9bb62713bbc7285c48d941363b0d67eaa23fb65998ce367da525383877328d466fb95408b7d1160036e1faa1776e9bf6a838eda4ce9e60f00a805735734bd9a7a8282af21174e1e6406455a1dc5edbe4bf94e9160725713ccfe356bbf5363adf1f18856b04049cbe021d5e3d0b7119724949495db9a8d6b53868103cfcfbcf178e8f8180d4b9b9a06180d69412de6ad3ea6fe40a299a170fe531aba2a5937fe1b951e218bccfc3510f242f6dd9cb9485fb72014192b1309fb357beab8055dc0229d9bc4994373eea2a7e06ba07d284ada708fd76606653849d2a7f337a1a1963b88ba9919ee207be74735ee5b6b1fd6fcd5b9c5145eee649a61b57aa90530061a65edbe6fe285cea5563870d97c339238f4c27eff1db01d0e7cd4ee47ab70a440fe21b71eb90afc2fb9a70a26797c083cce7f675c42db8482beeacfeb88080d6a552202d19cf23831fe2c82fbf9dfca67c72a159c636df11b75cf695fe91753e34cce8ee13227a0968c90582f830ef2fa5c63808e611866b6cb75479381f9fef1e4e9eeb414a2d9c9210e75daa8131f1795e40fdb876245d1379c9d653c8167228d512a9453eeffc263b7c7b9e5a8f4d8faf397cf602b5695998d915fa6b09397b68ded52c82869c339321c09b938926cabbefba5439368cc939ec7223bbc1e806227fc076e00df96a8363e0c62bda5838a70195a40753d9f5ffb075ff466b260949967d1eafc184caa1f9e04a4af1158fbaacc171aba67b61ada521e0377cd0c924c2d76cd6ba470361f5c179103c0a6ccb9aaf0e13d6272cfad4fc640548b82cb54401402e291379c45e1aa8fd3ccb70dc8bb3797feb1bb15b5c9b933e4d985ca3965d20fe8327eb088321a3668c276deb1681ac59902f2e90f17cc575875ed8f9ceefe9fc21cd4d6758cbd7d40648180be35d79b23a382ce9466aa97438da29a4506272aa6c74c1691b96253fbcd61711eb19dec67bafef7cf771a1c929ae56151a3d8182b0c776b7c5e49703dd5272b4b6888df337cef443d01ebc6abb17a64c3cc4741de9980d9fddff409e8fbc47fa3c995a9dffe33b06261974460d93ba1abfbe656768630e256d9a4183dc66a81a8b293e5f4f74dc795d8a46265d2aff4c0430f8a085d0e97857acf474e513d9c69b233db05b476172a57fdb8273e496165d398105c24aaa7be08bae51651603b90699e02667cf2ed17eee6db95ee31a5cb379ebd66a407f3858e46edc61dce48493883822cf6ae539ed0dd8adaa9c09896d02224c2c9bc9af43691486095436df7f931da2444dabfa56115c68babb9a3828fd1c9d355a1af50905f8a62aa55cdafe269951bc13c1b80f1a37aa6de192f0000e3c58cfe478e50fefa290f8df749e2df04f17fe2d7edb20fdf426e2eb9101a007d2ab9fd9dfeba18aafee6c2465f749f53aa86afd11d76631c76700b8916aa381d84a8a262ea771c46403839d32e397b5b11ac0c1fbd74b58054abd47cf76ffa7abc4eb83180e39f92dcf5d17d6df06587a3f2d4f0ac5ea7b68c66ceb46599712134bb9b5dad5636b9bb5eabc9b97ac0140e4dfc84a6af1efd219877e5faa362539b1bb327118b1c9396edeee9dc71e35158241cbee145a492bdbbd2af1a75c006ee269d2dcdb0b661e14b81757a4286b26087349969e9bb76e6d3e36098ad0bfb136b5ce120a050f1e99342b8825fba27d07b8f6a57780ac44463e8758f3281f151c79a71c54d9b280c3c1dbdcb971ac57dc77fe857a61c56529efc1c9338e07fc70d54b94d79708d1fa6f0af7846f29ba0cd3ccb316464b0ed8c885c8b659ccafa72d2c663d7d9b61744d17ca7c74724853ea69a31629238d9db1d9a24d11294871e24779b1bbd26bd2ad300c85ff7daae383efc6f82cf653d8d6f36413937a7653d73aeee8c53b09f882b4e582f2f1b0133ee4cc00eee7a1a94994d75b560168a947edfda7f53363ae702f88ead37474793687abce9307ef8689ce2aa1f816d5ab533b4435c15ece00ff86f19cda99b0ba39db45725ae6f891f94e2db4ddb960e8432dc5871a3a18d37341ff1014c0e628f5d49f80e872b86d7c4e784b9fd778d748282efe6bff88a613130bec7b6041e4875ee10952b2502b6fdb5539cb93f87478319dc8d3765b6e8e26a138c69baa30be8135eaf8f3718cb6093310818bc6ff66feb1cc2c4ef1398e1550c6dce31e6bbaeee47547a545df65e017ca7ab0cb0838534ea305cbc4766d2677d04d603136af6112aabfb0095ece3e41ac8dd2293acd53cb7872cce4b00d4cfaae08de22e140e912452a8ca21aa1141f8904db3c3a496d9c4dc552eb35467c886f01172", 0x1000}, {&(0x7f0000000b00)="3b539000a8e20f63fa678928f496ffc4e95e5b7c0c73c9e509d20fa625602ea6e012eb13a152afc45e088befde57a7004a0a8edf69ce34e94d5d9b61eae40b09d88b766ec439041d2ec25354e72915b66fd514a11d1875c1a159670b559caa3364c894787db35c114ca67ecea52bbb76439ec6f70c71842b78f8d94e1ac7bce1ec0298041e389a487a171f887c25ffec08b4afa078ca5b83e3f3ec30e937fe60c66427860852fb0d71888e5f20f8802d743be27d", 0xb4}], 0x5, &(0x7f0000000cc0)=[@ip_retopts={{0x24, 0x0, 0x7, {[@timestamp={0x44, 0x10, 0x8b, 0x0, 0x8, [0x2c78, 0x8, 0x7]}, @ra={0x94, 0x4, 0x7}]}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r6, @local, @rand_addr=0x64010102}}}, @ip_ttl={{0x14, 0x0, 0x2, 0xfffffff8}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @private=0xa010100, @broadcast}}}, @ip_tos_int={{0x14, 0x0, 0x1, 0x3}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x5}}], 0xb0}}], 0x5, 0x0) [ 2891.193668][T23541] bond1119: (slave bridge1145): making interface the new active one [ 2891.218233][T23541] bridge1145: entered promiscuous mode [ 2891.237913][T23541] bond1119: (slave bridge1145): Enslaving as an active interface with an up link 17:04:16 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3f4, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2891.317442][T23544] bond685: entered promiscuous mode [ 2891.344232][T23544] 8021q: adding VLAN 0 to HW filter on device bond685 17:04:16 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x2c}]}, 0x3c}}, 0x0) [ 2891.462748][T23548] bond1046: entered promiscuous mode [ 2891.468419][T23548] 8021q: adding VLAN 0 to HW filter on device bond1046 [ 2891.624671][T23554] bond1046: (slave bridge1112): making interface the new active one [ 2891.633825][T23554] bridge1112: entered promiscuous mode [ 2891.647291][T23554] bond1046: (slave bridge1112): Enslaving as an active interface with an up link [ 2891.656976][T23555] netlink: 56 bytes leftover after parsing attributes in process `syz-executor.3'. 17:04:16 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f00000000c0)=ANY=[@ANYBLOB="5800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="000000000000000075ff12800b0001006261746164760000280002800c0001004241544d414e5f56040001004241544d414e5f560c0001004241544d414e5f56acb28478fbaa1ab308f4b2eff3f36558dbec1c0c16ff78dfbd29c263e0c8a05b551d40933dcaaf26a4e704"], 0x58}}, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000000)={0xffffffffffffffff}) getsockname$packet(r2, &(0x7f0000000040)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000080)=0x14) 17:04:16 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x7a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2891.774755][T23558] bond809: entered promiscuous mode [ 2891.791443][T23558] 8021q: adding VLAN 0 to HW filter on device bond809 17:04:16 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x7}, 0x0) [ 2891.882584][T23564] netlink: 44 bytes leftover after parsing attributes in process `syz-executor.0'. 17:04:16 executing program 0: r0 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_tx_ring(r0, 0x107, 0x5, &(0x7f0000000100)=@req3={0x8000, 0x6, 0x8000, 0x6}, 0x1c) sendto$packet(r0, &(0x7f0000000580)="572e6ce9426bb12116f72baef9cd5c7f31e018", 0x13, 0x10, &(0x7f00000006c0)={0x11, 0x1, 0x0, 0x1, 0x4, 0x6, @random="f396d2d0759e"}, 0x14) r1 = socket$inet6(0xa, 0x2, 0x0) r2 = socket$nl_route(0x10, 0x3, 0x0) r3 = socket(0x26, 0x80000, 0x0) syz_genetlink_get_family_id$devlink(&(0x7f0000000240), r3) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000000c0)=0x14) getsockname$packet(r3, &(0x7f0000000280)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r2, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000180)=ANY=[@ANYBLOB="7c0000001000390400"/20, @ANYRES32=r5, @ANYBLOB="83080000000000005c0012800b000100697036746e6c00004c00028008000700ffff000014000300", @ANYRES32=r4], 0x7c}}, 0x0) getsockopt$PNPIPE_IFINDEX(0xffffffffffffffff, 0x113, 0x2, &(0x7f0000000c40)=0x0, &(0x7f0000000c80)=0x4) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) sendfile(r7, r8, 0x0, 0xf03b0000) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r9, r8, &(0x7f00000002c0)=0x335773c3, 0x8) bind$packet(r9, &(0x7f0000000540)={0x11, 0x1a, r4, 0x1, 0x6, 0x6, @random="73e9ab7fb3d6"}, 0x14) sendmmsg$inet(r1, &(0x7f0000000d80)=[{{&(0x7f0000000040)={0x2, 0x63, @remote}, 0x10, 0x0, 0x0, &(0x7f0000000000)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @empty, @initdev={0xac, 0x1e, 0x0, 0x0}}}}], 0x20}}, {{0x0, 0x0, 0x0, 0x0, 0xfffffffffffffffc}}, {{&(0x7f0000000140)={0x2, 0x4e21, @initdev={0xac, 0x1e, 0x0, 0x0}}, 0x10, &(0x7f0000000f40)=[{&(0x7f00000002c0)="dc87ae05385c19d6e8ed2d5958c96165d8ab1a829a3cea158ff38ec71a1978f56531e0f25371cccf61b6700d1c9cc25ea72d3c24f0690df682b643222f4497125903e59147029069b74f63d35f24e307a77ebd42683eff8fdc98bd250930429ce6a0c3c5da7dee48c39b80038079d34d1ca0e2eb2a6dd682117e50249fa873add9f2946c24f62a1f98fd58e18d71029657e30e92c85f02e35b6321e799a38d06928a99affd91c706a903108bcec86bf20300ccaa20809fead1ce2a50bac9ee436fc900212feebdd56836b0d2143a5f615b3c2792a942cc217507ec191299e523ac289b3e0dfbb2a6dd3bcc78d16b28", 0xef}, {&(0x7f0000000fc0)="b39c1f56d08b488343fcccfa0c8fbbe27259dbbf04aa9b2613943c72272bc9ef7142fd774c551413f961b33a4bf9d6d690de08e031b27327b3319bef0373d269a18f063d2ad2cdf8b1c6c7047e350f578663cc3e12bca7d301e181fa6dcf4486d2af15d6b00b6f0afe2c9a520d5c9e9bfc5b387432d1254a98132509fc", 0x7d}, {&(0x7f0000000440)="ad61563d14c5e0fed504a2ec73618a9015f3e912ba0a2ae092082a2e1940a8780e3f4f97d7765f7516763f8fa6d2fd03a3b72b7903717e7e89776acf8bb423ec3f16fc370ec5844e560f434bb2c1cdebe592eba037852b252de0663eb45e28725c924b3771292da6070b51ceb564fabbe5d0f73e637ca014161d", 0x7a}, {&(0x7f0000000ec0)="c0217b9b9a52455554742a7fb0a53399b53a9e3b98f3a292f3391620bd2fc204a7d7d489e6c43186fb0e7004c5eadc3392077bcbf7e2a807591fcfebc5c03a2b8556158367090868c5b2dc603a03aee0d1c6f0d88ceb8f205885f71a678b8e9f667ea019ea3a46039d526d", 0x6b}, {&(0x7f00000005c0)="333425b8bf13b81ce96d57f28b0a0c2ead98f99c6357f6fcb7e507b81d9697228a99efec5f95c6d94ca345a5d35246d0724d2ad197acb1b548bae8474fd0fc1620b1d083cba852d761f1da5cf0bd1806a846f535d162c89a3221822fc1e7c776faac237149f66f59991eae894a29d4fe8c0f4e41d06b5e6638bdd86816509ab7e85032ae4c110506f81064bd99cafd357067b3bcdc4956d28d96468a33dd751f2103cfe108153f2858d8dfee23271178abdd9dc5c56c0404e5a592c0e437a15d1a94d3d1288e53d507a04ffb309ff382c0446a42d98f065644044481b1792d92e2b0337270271c4d2db49061f1e95becaa8c1c9e3ec243b4c1a2960b21", 0xfd}, {&(0x7f00000004c0)="16d722aa7b6d13f7c902e510ab49dab4a7ce5b384dfc86e048b35cc83588825884c1abf2fe179142f68ebb98972677c9d4fec1a31efa84", 0x37}], 0x6}}, {{&(0x7f0000000740)={0x2, 0x4e22, @loopback}, 0x10, &(0x7f00000008c0)=[{&(0x7f0000000780)="ea57116eaa1b0f0fd2c885d2654ca2e3a7a677ad0c0594c9121851f4626bb5d5f6d0bc7e8bf95cd17257449e1faa4cad", 0x30}, {&(0x7f00000007c0)="ad5e4ecbc071ed99de56c354712042e8b8fb1f7bc9d129f8abbc8c609cdd6e6fdad4100a8cdc4364b6609374d0d5ff14c413aa4845abd526bb211f1176361e5c2e45336b5c2a6dab88aa5758a43b6bd1b8edc49ddf1178bc452ebad98a4730cef6023bf1c138243e2528aa8744bac0a45897ceb6d4fea2d2ae0e0a7962a28129bae0a28f8f865fc6badad6b5e647b5dcac2cdf46333cbdaef6336132457dc51056147033b6e83ad5d60344df2ee7a1d32cb3fb49bd840a18c17665209aaabb4198f8d22925a9a6c4b27baae614eaa5a304019c651e100abfe7f58299f83388a97f8a5a194343f78045e51d698c51fe9902b7183442620bb5b44c59686cbbfe", 0xff}], 0x2}}, {{&(0x7f0000000900)={0x2, 0x4e20, @dev={0xac, 0x14, 0x14, 0x25}}, 0x10, &(0x7f0000000bc0)=[{&(0x7f0000000940)="9bbe318b08799c2b09893789f703", 0xe}, {&(0x7f0000000980)="adc3540f64c28db8cb5f98ec16b2d3d2783519282da6f1582fd6acdb2042f1ce4770b19079ab95e5a7a6571931f86c78ae7cf409be906214d3e709887b86304426095733c1d19aabee67c000716129ece11a6f7e7c15da004ad7360ceed8d7cd84dac44d32d23a826f4ce2f11a3b6d24258b27deda9b8bd976c0e3ce3d60c70f8fd68586363ae6ccc141f608f3f3fa527daf66099b23332b2285a5e18e2b6db87c88", 0xa2}, {&(0x7f0000000a40)="4d4f3a1c328254bed4b1c5450374bf5fa8b680b1c51b73d37cf632b43a54097963fa199855c8640fc0119f30cd416bfdb13fadc7cf8b3a71b9040dfeb3cbdd9c5efb779620008cecf1e52d05639eb8b0d1b85d9a3aad8f16c37e547756903c3864f43c7834539906076400741210f7f30b680465d7652cb629cb24c6dba1cefe2477aed6bad008ad939a356d661afe3ee60a949206f523daa2e50557053f497adfd34acccf0684524caf29f0e395b8cf9e40", 0xb2}, {&(0x7f0000001280)="78cb8d4da48da241aee34e5214efc55e817e5299aefe89a5fad5c369f097b0b4804e4e45a819add0e37f8316065d92648481ae4fd98455bd905e107d6bb4c2c47ce7c92c183978a55aed4e55e3d784b9545cb58dce3f668a9362f555cf78f5fcd411e4453377801fd92aa5a65d922ddea461a71039a429e79b874ef6290b6fadc6fc1af7fbe7763412209ae55212d97bcad4f7f5c31ccf7418ec7746193c0f8ff22ab19698aadf810d85cd9b765c619cd612ae26cd3d3c6cd92a346ced9ebc028e6cdc4affe5455a307983440a02071def9a6e183580d91235093ab9220088ebdeecef19f6ded9a14d9e988e9a0f6792aa1561e83da436e20db9e6a6e85d8155c73b20cdee5ced44f3fa7a5d570b6a1eaf8e7d601f1dc3bb6adb8a3faa44c5713aa2c14468f56e3b367381594f983e07cdb146babe5795a1daa110e52ad9449bec0007de93ff8dacf15130ab29591c83c4ed48f218f954e724d60328562eafbbc642348d5b521a2ad7c63530d73ec5d8eeef4c526a5902a8fdb4c53180d35211ace2c26e20c97aae6ed2c135c3ea4c65b6baef154a7997dc3dae44cd1175977f5c59ff64ad40a5b707cb3ac681356f92e3b1fe635cddad54406d2d831b28217c10aa8fa45981779175528e2f8686f384669e2deeea2fb286c52e2bc3f2d1d19257605372263333a5f0d1673630b9208877caddd546fb42775afd6f50eb35491422c0d8df3db5fbc71e461edae26091d5efcd4385cd636ffd3be606e362bce2cf4a66b84885e6f9658968ff01ee81ff967ae8365b79d55c8939745a8bede19e332aa7532cc0155cd5eb5480e32ff7a94b203e4db70784e32364c1f070b5af1c944e513bed80bb3a02cb722917181b8d313918dce28be4f815009b3026c787f345263c34f56b172ae62880cacc8e1338d7935a319bf375860b0d7f502cea86aec94d7fe6084fc21ef1ca8c0427a5952eebdb12e6cda414ca381afd21093f773244a3c7f632daddfc877fe6148b164906c6deb7bac84982691318c448b73bf634ba80819528d56063c5a760137ed5cc4c9c674e56168ff5a84c91681881733f4813b22961757f267c4ffc2798e6ad4dd21f27d0e7a61d7921d4f252b30b799e622041cb18426a12bd9f77be2b10e56ad9d26ab97eedc85ef92e6ec4212201766ddd826a012c3a5377a927d203f59a547449498c9091d0f7f76a8ed1fa6841b14d7086400b3c13f4c8d61edf1e477c1fcecc23ccb25b0a4476a4de55b2d45cde8dc8d3fb5c44d8fa1f2f8881f47fbe058ec1f66df6f24c00b904873965f5a6d3ae24d79a39c59b8122785d3d2a7986060a72b79572fe01a3aa43c8cdb45a565450f42e0f9f4120be3fa2fe4f2df848044f2034f01a618bdd86f93b4518955f695bb2a8a026da92c331d7e5daf90ca1875bea747097dfa16d3da173eb2e24f36d4ba9f567b3035734acb018cb2f99b6e2d22bc4b18cbd522d7abdbd2aaab40ecec9e2c9b6754027950a9726808239916d23c8be6eb902721d9983cda3821de1b7fc3132bd2ca421fc03eaffd6fb51db22ffe5a8603f48277ccad1a79722ef2ae469960a226ccd46cbbe15b1acacddc8d2863c3ecac6d99f83f204bd24c381045fef1832dc15374104a9000cde56a0555b7408b00a56d61a348359a4e30de2605d39b9694999b607a9d5a559071c95ec4f1ece63245141497ea486394daee029a39b59c4f283f3b93ba675b5f1e5cd76e4858a0d0af22a2619d3403d2e75f5803b4a765c68e58578b6c3ae7a0611a7a89740ee0dd93c195019ce3e563507e3924f51a8695a79e6a2c05f072d7813d3ba215926bbdb345818216cc7ae39d46f11e82775f3bbe3679b4eb4faa4d2b5c1fe1595a899847fbec391a8235b6f37bd2fc527a44a54978f63d6e241e4e12157ea2e53647f88d4bb9aca48955317576e912335b20f09d5bf6ea4ccc7f83f7ecf3cfc039166f1c88947679fb16864b8257c2af3c807560d30eb5d226ac3e834a9ebde3159d2b1c9de963b3217c52e005816c339f672b137902991deab2ba7d8a837d5a68345f9db6aadff13647654705e4739bfa50f6e6065361255c26658952b00f770be26764d8e3730e085ca68c541ab04ac5985694fca2545485cc132ae33fb0ffc40e3535b318632ba4be7e0fbb77b57bb1889479fd89a72912b7c81ae8dea9dc2083edc597a8a4c0327d5e2d26a270b1688365a1d551657c36b16dc321c7361085b660d1c6e452148f03fbd67142a59c73e49bf5f1182f5688dfd557bd8448bb09b15e13339fca077562d120614c535d748cb338c2bef477b606082b10e51634ffa639bc728ece96189f1a3f48d9e06decd47dc32f01e51ffc531ea9e4a92a0f3b136c0597f4e85e0f00d45061660e80f6417fe56ebda9a2532eec07fdb7b16a9e156b1f95f0df46286434e96038ca06bb0a5b1c721356c15c51122313830cd75fd6fd64800e8281612e72ad99a8aef10a39a3bad8f92e031ed6321081b9d1f0c5c64dc406affa89fec40cf150e9035c8658a7d0a63da5bd713530475e80699261e3cdb88e8563ff9ca792a62d83185f686fac1d012b630c1b919baaf494ead9624932985436eea97019a8bd1b258bdd765c44c93952409cfc3f6506510ab6b48d51885d9af2a5033bfc204390e2896341af68ec7d5f95ca7a5cf40e5c1361f5c9224008ba54d5c0e299935f1c3f779d942b546036277a1e3065ca664f2a234ae7852c79bf681c6745a3568a8ce10b9453d05ce4fbd5631c7c5d22e9df045b34c8fedebb9f0b6232f7949462f11ffb0c8b8ea2c716803d9bb5bc6c11ed3ff9031d1a26e3b0e89d51d7e2aca47ae6323967c547d07345d0b93d9fa1cd3c478bc49e354641a43cac4f8f61b637a1fdfd6157e0e9b6d6afcde54ca9319774ce437b80e5f65e7b863eec04e67c362bc21c63f368d9d40bffaab025f220f5562d0a1441cfd484f074396b68c21dffc5a4dfb071b8de2f3aa89b747593448bfcdc254febbb8207d19c8df54b7a3f208e0686784d21cb75e7321bb7cb041719223abeb9e73f64f085122fd837c88a2aa0dcc4da793ed4155a2fc11c82405cc52652400d44050d2b54883c09123c3c3bb8f4c6a72f737b1d81debea5472b0b041ed5ac0555a1f39074c5dcd88f4d7d94363261dfc17c37917a937318390fbfb5480754eaa2b53b1545d516563adfeba4fbcdade5e79aed55e5a60889b4f69c814da4f57d5cc3daefd55b93efca01f013334ac882060e1aa50f5ddf0f7e6b79b3ff53688b76c3008fa7a5e5f6497192a1c04ed4fe85ec026871d2478b3d27199fb938a97c4ae0cd59d29e810bee98afba9e052e144de58aad7e2e8748419ae2afa997a59576bf7ce4df35328a38729495750028bba8838c3835875a0f3a7bc19da0e798e86c1ada59fb5b31a86f6daa34ac3a9a2729935c662bf82e3b79d3737ebf9bb62713bbc7285c48d941363b0d67eaa23fb65998ce367da525383877328d466fb95408b7d1160036e1faa1776e9bf6a838eda4ce9e60f00a805735734bd9a7a8282af21174e1e6406455a1dc5edbe4bf94e9160725713ccfe356bbf5363adf1f18856b04049cbe021d5e3d0b7119724949495db9a8d6b53868103cfcfbcf178e8f8180d4b9b9a06180d69412de6ad3ea6fe40a299a170fe531aba2a5937fe1b951e218bccfc3510f242f6dd9cb9485fb72014192b1309fb357beab8055dc0229d9bc4994373eea2a7e06ba07d284ada708fd76606653849d2a7f337a1a1963b88ba9919ee207be74735ee5b6b1fd6fcd5b9c5145eee649a61b57aa90530061a65edbe6fe285cea5563870d97c339238f4c27eff1db01d0e7cd4ee47ab70a440fe21b71eb90afc2fb9a70a26797c083cce7f675c42db8482beeacfeb88080d6a552202d19cf23831fe2c82fbf9dfca67c72a159c636df11b75cf695fe91753e34cce8ee13227a0968c90582f830ef2fa5c63808e611866b6cb75479381f9fef1e4e9eeb414a2d9c9210e75daa8131f1795e40fdb876245d1379c9d653c8167228d512a9453eeffc263b7c7b9e5a8f4d8faf397cf602b5695998d915fa6b09397b68ded52c82869c339321c09b938926cabbefba5439368cc939ec7223bbc1e806227fc076e00df96a8363e0c62bda5838a70195a40753d9f5ffb075ff466b260949967d1eafc184caa1f9e04a4af1158fbaacc171aba67b61ada521e0377cd0c924c2d76cd6ba470361f5c179103c0a6ccb9aaf0e13d6272cfad4fc640548b82cb54401402e291379c45e1aa8fd3ccb70dc8bb3797feb1bb15b5c9b933e4d985ca3965d20fe8327eb088321a3668c276deb1681ac59902f2e90f17cc575875ed8f9ceefe9fc21cd4d6758cbd7d40648180be35d79b23a382ce9466aa97438da29a4506272aa6c74c1691b96253fbcd61711eb19dec67bafef7cf771a1c929ae56151a3d8182b0c776b7c5e49703dd5272b4b6888df337cef443d01ebc6abb17a64c3cc4741de9980d9fddff409e8fbc47fa3c995a9dffe33b06261974460d93ba1abfbe656768630e256d9a4183dc66a81a8b293e5f4f74dc795d8a46265d2aff4c0430f8a085d0e97857acf474e513d9c69b233db05b476172a57fdb8273e496165d398105c24aaa7be08bae51651603b90699e02667cf2ed17eee6db95ee31a5cb379ebd66a407f3858e46edc61dce48493883822cf6ae539ed0dd8adaa9c09896d02224c2c9bc9af43691486095436df7f931da2444dabfa56115c68babb9a3828fd1c9d355a1af50905f8a62aa55cdafe269951bc13c1b80f1a37aa6de192f0000e3c58cfe478e50fefa290f8df749e2df04f17fe2d7edb20fdf426e2eb9101a007d2ab9fd9dfeba18aafee6c2465f749f53aa86afd11d76631c76700b8916aa381d84a8a262ea771c46403839d32e397b5b11ac0c1fbd74b58054abd47cf76ffa7abc4eb83180e39f92dcf5d17d6df06587a3f2d4f0ac5ea7b68c66ceb46599712134bb9b5dad5636b9bb5eabc9b97ac0140e4dfc84a6af1efd219877e5faa362539b1bb327118b1c9396edeee9dc71e35158241cbee145a492bdbbd2af1a75c006ee269d2dcdb0b661e14b81757a4286b26087349969e9bb76e6d3e36098ad0bfb136b5ce120a050f1e99342b8825fba27d07b8f6a57780ac44463e8758f3281f151c79a71c54d9b280c3c1dbdcb971ac57dc77fe857a61c56529efc1c9338e07fc70d54b94d79708d1fa6f0af7846f29ba0cd3ccb316464b0ed8c885c8b659ccafa72d2c663d7d9b61744d17ca7c74724853ea69a31629238d9db1d9a24d11294871e24779b1bbd26bd2ad300c85ff7daae383efc6f82cf653d8d6f36413937a7653d73aeee8c53b09f882b4e582f2f1b0133ee4cc00eee7a1a94994d75b560168a947edfda7f53363ae702f88ead37474793687abce9307ef8689ce2aa1f816d5ab533b4435c15ece00ff86f19cda99b0ba39db45725ae6f891f94e2db4ddb960e8432dc5871a3a18d37341ff1014c0e628f5d49f80e872b86d7c4e784b9fd778d748282efe6bff88a613130bec7b6041e4875ee10952b2502b6fdb5539cb93f87478319dc8d3765b6e8e26a138c69baa30be8135eaf8f3718cb6093310818bc6ff66feb1cc2c4ef1398e1550c6dce31e6bbaeee47547a545df65e017ca7ab0cb0838534ea305cbc4766d2677d04d603136af6112aabfb0095ece3e41ac8dd2293acd53cb7872cce4b00d4cfaae08de22e140e912452a8ca21aa1141f8904db3c3a496d9c4dc552eb35467c886f01172", 0x1000}, {&(0x7f0000000b00)="3b539000a8e20f63fa678928f496ffc4e95e5b7c0c73c9e509d20fa625602ea6e012eb13a152afc45e088befde57a7004a0a8edf69ce34e94d5d9b61eae40b09d88b766ec439041d2ec25354e72915b66fd514a11d1875c1a159670b559caa3364c894787db35c114ca67ecea52bbb76439ec6f70c71842b78f8d94e1ac7bce1ec0298041e389a487a171f887c25ffec08b4afa078ca5b83e3f3ec30e937fe60c66427860852fb0d71888e5f20f8802d743be27d", 0xb4}], 0x5, &(0x7f0000000cc0)=[@ip_retopts={{0x24, 0x0, 0x7, {[@timestamp={0x44, 0x10, 0x8b, 0x0, 0x8, [0x2c78, 0x8, 0x7]}, @ra={0x94, 0x4, 0x7}]}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r6, @local, @rand_addr=0x64010102}}}, @ip_ttl={{0x14, 0x0, 0x2, 0xfffffff8}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @private=0xa010100, @broadcast}}}, @ip_tos_int={{0x14, 0x0, 0x1, 0x3}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x5}}], 0xb0}}], 0x5, 0x0) socket$packet(0x11, 0x2, 0x300) (async) setsockopt$packet_tx_ring(r0, 0x107, 0x5, &(0x7f0000000100)=@req3={0x8000, 0x6, 0x8000, 0x6}, 0x1c) (async) sendto$packet(r0, &(0x7f0000000580)="572e6ce9426bb12116f72baef9cd5c7f31e018", 0x13, 0x10, &(0x7f00000006c0)={0x11, 0x1, 0x0, 0x1, 0x4, 0x6, @random="f396d2d0759e"}, 0x14) (async) socket$inet6(0xa, 0x2, 0x0) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket(0x26, 0x80000, 0x0) (async) syz_genetlink_get_family_id$devlink(&(0x7f0000000240), r3) (async) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000000c0)=0x14) (async) getsockname$packet(r3, &(0x7f0000000280)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) (async) sendmsg$nl_route(r2, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000180)=ANY=[@ANYBLOB="7c0000001000390400"/20, @ANYRES32=r5, @ANYBLOB="83080000000000005c0012800b000100697036746e6c00004c00028008000700ffff000014000300", @ANYRES32=r4], 0x7c}}, 0x0) (async) getsockopt$PNPIPE_IFINDEX(0xffffffffffffffff, 0x113, 0x2, &(0x7f0000000c40), &(0x7f0000000c80)=0x4) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) (async) sendfile(r7, r8, 0x0, 0xf03b0000) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) sendfile(r9, r8, &(0x7f00000002c0)=0x335773c3, 0x8) (async) bind$packet(r9, &(0x7f0000000540)={0x11, 0x1a, r4, 0x1, 0x6, 0x6, @random="73e9ab7fb3d6"}, 0x14) (async) sendmmsg$inet(r1, &(0x7f0000000d80)=[{{&(0x7f0000000040)={0x2, 0x63, @remote}, 0x10, 0x0, 0x0, &(0x7f0000000000)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @empty, @initdev={0xac, 0x1e, 0x0, 0x0}}}}], 0x20}}, {{0x0, 0x0, 0x0, 0x0, 0xfffffffffffffffc}}, {{&(0x7f0000000140)={0x2, 0x4e21, @initdev={0xac, 0x1e, 0x0, 0x0}}, 0x10, &(0x7f0000000f40)=[{&(0x7f00000002c0)="dc87ae05385c19d6e8ed2d5958c96165d8ab1a829a3cea158ff38ec71a1978f56531e0f25371cccf61b6700d1c9cc25ea72d3c24f0690df682b643222f4497125903e59147029069b74f63d35f24e307a77ebd42683eff8fdc98bd250930429ce6a0c3c5da7dee48c39b80038079d34d1ca0e2eb2a6dd682117e50249fa873add9f2946c24f62a1f98fd58e18d71029657e30e92c85f02e35b6321e799a38d06928a99affd91c706a903108bcec86bf20300ccaa20809fead1ce2a50bac9ee436fc900212feebdd56836b0d2143a5f615b3c2792a942cc217507ec191299e523ac289b3e0dfbb2a6dd3bcc78d16b28", 0xef}, {&(0x7f0000000fc0)="b39c1f56d08b488343fcccfa0c8fbbe27259dbbf04aa9b2613943c72272bc9ef7142fd774c551413f961b33a4bf9d6d690de08e031b27327b3319bef0373d269a18f063d2ad2cdf8b1c6c7047e350f578663cc3e12bca7d301e181fa6dcf4486d2af15d6b00b6f0afe2c9a520d5c9e9bfc5b387432d1254a98132509fc", 0x7d}, {&(0x7f0000000440)="ad61563d14c5e0fed504a2ec73618a9015f3e912ba0a2ae092082a2e1940a8780e3f4f97d7765f7516763f8fa6d2fd03a3b72b7903717e7e89776acf8bb423ec3f16fc370ec5844e560f434bb2c1cdebe592eba037852b252de0663eb45e28725c924b3771292da6070b51ceb564fabbe5d0f73e637ca014161d", 0x7a}, {&(0x7f0000000ec0)="c0217b9b9a52455554742a7fb0a53399b53a9e3b98f3a292f3391620bd2fc204a7d7d489e6c43186fb0e7004c5eadc3392077bcbf7e2a807591fcfebc5c03a2b8556158367090868c5b2dc603a03aee0d1c6f0d88ceb8f205885f71a678b8e9f667ea019ea3a46039d526d", 0x6b}, {&(0x7f00000005c0)="333425b8bf13b81ce96d57f28b0a0c2ead98f99c6357f6fcb7e507b81d9697228a99efec5f95c6d94ca345a5d35246d0724d2ad197acb1b548bae8474fd0fc1620b1d083cba852d761f1da5cf0bd1806a846f535d162c89a3221822fc1e7c776faac237149f66f59991eae894a29d4fe8c0f4e41d06b5e6638bdd86816509ab7e85032ae4c110506f81064bd99cafd357067b3bcdc4956d28d96468a33dd751f2103cfe108153f2858d8dfee23271178abdd9dc5c56c0404e5a592c0e437a15d1a94d3d1288e53d507a04ffb309ff382c0446a42d98f065644044481b1792d92e2b0337270271c4d2db49061f1e95becaa8c1c9e3ec243b4c1a2960b21", 0xfd}, {&(0x7f00000004c0)="16d722aa7b6d13f7c902e510ab49dab4a7ce5b384dfc86e048b35cc83588825884c1abf2fe179142f68ebb98972677c9d4fec1a31efa84", 0x37}], 0x6}}, {{&(0x7f0000000740)={0x2, 0x4e22, @loopback}, 0x10, &(0x7f00000008c0)=[{&(0x7f0000000780)="ea57116eaa1b0f0fd2c885d2654ca2e3a7a677ad0c0594c9121851f4626bb5d5f6d0bc7e8bf95cd17257449e1faa4cad", 0x30}, {&(0x7f00000007c0)="ad5e4ecbc071ed99de56c354712042e8b8fb1f7bc9d129f8abbc8c609cdd6e6fdad4100a8cdc4364b6609374d0d5ff14c413aa4845abd526bb211f1176361e5c2e45336b5c2a6dab88aa5758a43b6bd1b8edc49ddf1178bc452ebad98a4730cef6023bf1c138243e2528aa8744bac0a45897ceb6d4fea2d2ae0e0a7962a28129bae0a28f8f865fc6badad6b5e647b5dcac2cdf46333cbdaef6336132457dc51056147033b6e83ad5d60344df2ee7a1d32cb3fb49bd840a18c17665209aaabb4198f8d22925a9a6c4b27baae614eaa5a304019c651e100abfe7f58299f83388a97f8a5a194343f78045e51d698c51fe9902b7183442620bb5b44c59686cbbfe", 0xff}], 0x2}}, {{&(0x7f0000000900)={0x2, 0x4e20, @dev={0xac, 0x14, 0x14, 0x25}}, 0x10, &(0x7f0000000bc0)=[{&(0x7f0000000940)="9bbe318b08799c2b09893789f703", 0xe}, {&(0x7f0000000980)="adc3540f64c28db8cb5f98ec16b2d3d2783519282da6f1582fd6acdb2042f1ce4770b19079ab95e5a7a6571931f86c78ae7cf409be906214d3e709887b86304426095733c1d19aabee67c000716129ece11a6f7e7c15da004ad7360ceed8d7cd84dac44d32d23a826f4ce2f11a3b6d24258b27deda9b8bd976c0e3ce3d60c70f8fd68586363ae6ccc141f608f3f3fa527daf66099b23332b2285a5e18e2b6db87c88", 0xa2}, {&(0x7f0000000a40)="4d4f3a1c328254bed4b1c5450374bf5fa8b680b1c51b73d37cf632b43a54097963fa199855c8640fc0119f30cd416bfdb13fadc7cf8b3a71b9040dfeb3cbdd9c5efb779620008cecf1e52d05639eb8b0d1b85d9a3aad8f16c37e547756903c3864f43c7834539906076400741210f7f30b680465d7652cb629cb24c6dba1cefe2477aed6bad008ad939a356d661afe3ee60a949206f523daa2e50557053f497adfd34acccf0684524caf29f0e395b8cf9e40", 0xb2}, {&(0x7f0000001280)="78cb8d4da48da241aee34e5214efc55e817e5299aefe89a5fad5c369f097b0b4804e4e45a819add0e37f8316065d92648481ae4fd98455bd905e107d6bb4c2c47ce7c92c183978a55aed4e55e3d784b9545cb58dce3f668a9362f555cf78f5fcd411e4453377801fd92aa5a65d922ddea461a71039a429e79b874ef6290b6fadc6fc1af7fbe7763412209ae55212d97bcad4f7f5c31ccf7418ec7746193c0f8ff22ab19698aadf810d85cd9b765c619cd612ae26cd3d3c6cd92a346ced9ebc028e6cdc4affe5455a307983440a02071def9a6e183580d91235093ab9220088ebdeecef19f6ded9a14d9e988e9a0f6792aa1561e83da436e20db9e6a6e85d8155c73b20cdee5ced44f3fa7a5d570b6a1eaf8e7d601f1dc3bb6adb8a3faa44c5713aa2c14468f56e3b367381594f983e07cdb146babe5795a1daa110e52ad9449bec0007de93ff8dacf15130ab29591c83c4ed48f218f954e724d60328562eafbbc642348d5b521a2ad7c63530d73ec5d8eeef4c526a5902a8fdb4c53180d35211ace2c26e20c97aae6ed2c135c3ea4c65b6baef154a7997dc3dae44cd1175977f5c59ff64ad40a5b707cb3ac681356f92e3b1fe635cddad54406d2d831b28217c10aa8fa45981779175528e2f8686f384669e2deeea2fb286c52e2bc3f2d1d19257605372263333a5f0d1673630b9208877caddd546fb42775afd6f50eb35491422c0d8df3db5fbc71e461edae26091d5efcd4385cd636ffd3be606e362bce2cf4a66b84885e6f9658968ff01ee81ff967ae8365b79d55c8939745a8bede19e332aa7532cc0155cd5eb5480e32ff7a94b203e4db70784e32364c1f070b5af1c944e513bed80bb3a02cb722917181b8d313918dce28be4f815009b3026c787f345263c34f56b172ae62880cacc8e1338d7935a319bf375860b0d7f502cea86aec94d7fe6084fc21ef1ca8c0427a5952eebdb12e6cda414ca381afd21093f773244a3c7f632daddfc877fe6148b164906c6deb7bac84982691318c448b73bf634ba80819528d56063c5a760137ed5cc4c9c674e56168ff5a84c91681881733f4813b22961757f267c4ffc2798e6ad4dd21f27d0e7a61d7921d4f252b30b799e622041cb18426a12bd9f77be2b10e56ad9d26ab97eedc85ef92e6ec4212201766ddd826a012c3a5377a927d203f59a547449498c9091d0f7f76a8ed1fa6841b14d7086400b3c13f4c8d61edf1e477c1fcecc23ccb25b0a4476a4de55b2d45cde8dc8d3fb5c44d8fa1f2f8881f47fbe058ec1f66df6f24c00b904873965f5a6d3ae24d79a39c59b8122785d3d2a7986060a72b79572fe01a3aa43c8cdb45a565450f42e0f9f4120be3fa2fe4f2df848044f2034f01a618bdd86f93b4518955f695bb2a8a026da92c331d7e5daf90ca1875bea747097dfa16d3da173eb2e24f36d4ba9f567b3035734acb018cb2f99b6e2d22bc4b18cbd522d7abdbd2aaab40ecec9e2c9b6754027950a9726808239916d23c8be6eb902721d9983cda3821de1b7fc3132bd2ca421fc03eaffd6fb51db22ffe5a8603f48277ccad1a79722ef2ae469960a226ccd46cbbe15b1acacddc8d2863c3ecac6d99f83f204bd24c381045fef1832dc15374104a9000cde56a0555b7408b00a56d61a348359a4e30de2605d39b9694999b607a9d5a559071c95ec4f1ece63245141497ea486394daee029a39b59c4f283f3b93ba675b5f1e5cd76e4858a0d0af22a2619d3403d2e75f5803b4a765c68e58578b6c3ae7a0611a7a89740ee0dd93c195019ce3e563507e3924f51a8695a79e6a2c05f072d7813d3ba215926bbdb345818216cc7ae39d46f11e82775f3bbe3679b4eb4faa4d2b5c1fe1595a899847fbec391a8235b6f37bd2fc527a44a54978f63d6e241e4e12157ea2e53647f88d4bb9aca48955317576e912335b20f09d5bf6ea4ccc7f83f7ecf3cfc039166f1c88947679fb16864b8257c2af3c807560d30eb5d226ac3e834a9ebde3159d2b1c9de963b3217c52e005816c339f672b137902991deab2ba7d8a837d5a68345f9db6aadff13647654705e4739bfa50f6e6065361255c26658952b00f770be26764d8e3730e085ca68c541ab04ac5985694fca2545485cc132ae33fb0ffc40e3535b318632ba4be7e0fbb77b57bb1889479fd89a72912b7c81ae8dea9dc2083edc597a8a4c0327d5e2d26a270b1688365a1d551657c36b16dc321c7361085b660d1c6e452148f03fbd67142a59c73e49bf5f1182f5688dfd557bd8448bb09b15e13339fca077562d120614c535d748cb338c2bef477b606082b10e51634ffa639bc728ece96189f1a3f48d9e06decd47dc32f01e51ffc531ea9e4a92a0f3b136c0597f4e85e0f00d45061660e80f6417fe56ebda9a2532eec07fdb7b16a9e156b1f95f0df46286434e96038ca06bb0a5b1c721356c15c51122313830cd75fd6fd64800e8281612e72ad99a8aef10a39a3bad8f92e031ed6321081b9d1f0c5c64dc406affa89fec40cf150e9035c8658a7d0a63da5bd713530475e80699261e3cdb88e8563ff9ca792a62d83185f686fac1d012b630c1b919baaf494ead9624932985436eea97019a8bd1b258bdd765c44c93952409cfc3f6506510ab6b48d51885d9af2a5033bfc204390e2896341af68ec7d5f95ca7a5cf40e5c1361f5c9224008ba54d5c0e299935f1c3f779d942b546036277a1e3065ca664f2a234ae7852c79bf681c6745a3568a8ce10b9453d05ce4fbd5631c7c5d22e9df045b34c8fedebb9f0b6232f7949462f11ffb0c8b8ea2c716803d9bb5bc6c11ed3ff9031d1a26e3b0e89d51d7e2aca47ae6323967c547d07345d0b93d9fa1cd3c478bc49e354641a43cac4f8f61b637a1fdfd6157e0e9b6d6afcde54ca9319774ce437b80e5f65e7b863eec04e67c362bc21c63f368d9d40bffaab025f220f5562d0a1441cfd484f074396b68c21dffc5a4dfb071b8de2f3aa89b747593448bfcdc254febbb8207d19c8df54b7a3f208e0686784d21cb75e7321bb7cb041719223abeb9e73f64f085122fd837c88a2aa0dcc4da793ed4155a2fc11c82405cc52652400d44050d2b54883c09123c3c3bb8f4c6a72f737b1d81debea5472b0b041ed5ac0555a1f39074c5dcd88f4d7d94363261dfc17c37917a937318390fbfb5480754eaa2b53b1545d516563adfeba4fbcdade5e79aed55e5a60889b4f69c814da4f57d5cc3daefd55b93efca01f013334ac882060e1aa50f5ddf0f7e6b79b3ff53688b76c3008fa7a5e5f6497192a1c04ed4fe85ec026871d2478b3d27199fb938a97c4ae0cd59d29e810bee98afba9e052e144de58aad7e2e8748419ae2afa997a59576bf7ce4df35328a38729495750028bba8838c3835875a0f3a7bc19da0e798e86c1ada59fb5b31a86f6daa34ac3a9a2729935c662bf82e3b79d3737ebf9bb62713bbc7285c48d941363b0d67eaa23fb65998ce367da525383877328d466fb95408b7d1160036e1faa1776e9bf6a838eda4ce9e60f00a805735734bd9a7a8282af21174e1e6406455a1dc5edbe4bf94e9160725713ccfe356bbf5363adf1f18856b04049cbe021d5e3d0b7119724949495db9a8d6b53868103cfcfbcf178e8f8180d4b9b9a06180d69412de6ad3ea6fe40a299a170fe531aba2a5937fe1b951e218bccfc3510f242f6dd9cb9485fb72014192b1309fb357beab8055dc0229d9bc4994373eea2a7e06ba07d284ada708fd76606653849d2a7f337a1a1963b88ba9919ee207be74735ee5b6b1fd6fcd5b9c5145eee649a61b57aa90530061a65edbe6fe285cea5563870d97c339238f4c27eff1db01d0e7cd4ee47ab70a440fe21b71eb90afc2fb9a70a26797c083cce7f675c42db8482beeacfeb88080d6a552202d19cf23831fe2c82fbf9dfca67c72a159c636df11b75cf695fe91753e34cce8ee13227a0968c90582f830ef2fa5c63808e611866b6cb75479381f9fef1e4e9eeb414a2d9c9210e75daa8131f1795e40fdb876245d1379c9d653c8167228d512a9453eeffc263b7c7b9e5a8f4d8faf397cf602b5695998d915fa6b09397b68ded52c82869c339321c09b938926cabbefba5439368cc939ec7223bbc1e806227fc076e00df96a8363e0c62bda5838a70195a40753d9f5ffb075ff466b260949967d1eafc184caa1f9e04a4af1158fbaacc171aba67b61ada521e0377cd0c924c2d76cd6ba470361f5c179103c0a6ccb9aaf0e13d6272cfad4fc640548b82cb54401402e291379c45e1aa8fd3ccb70dc8bb3797feb1bb15b5c9b933e4d985ca3965d20fe8327eb088321a3668c276deb1681ac59902f2e90f17cc575875ed8f9ceefe9fc21cd4d6758cbd7d40648180be35d79b23a382ce9466aa97438da29a4506272aa6c74c1691b96253fbcd61711eb19dec67bafef7cf771a1c929ae56151a3d8182b0c776b7c5e49703dd5272b4b6888df337cef443d01ebc6abb17a64c3cc4741de9980d9fddff409e8fbc47fa3c995a9dffe33b06261974460d93ba1abfbe656768630e256d9a4183dc66a81a8b293e5f4f74dc795d8a46265d2aff4c0430f8a085d0e97857acf474e513d9c69b233db05b476172a57fdb8273e496165d398105c24aaa7be08bae51651603b90699e02667cf2ed17eee6db95ee31a5cb379ebd66a407f3858e46edc61dce48493883822cf6ae539ed0dd8adaa9c09896d02224c2c9bc9af43691486095436df7f931da2444dabfa56115c68babb9a3828fd1c9d355a1af50905f8a62aa55cdafe269951bc13c1b80f1a37aa6de192f0000e3c58cfe478e50fefa290f8df749e2df04f17fe2d7edb20fdf426e2eb9101a007d2ab9fd9dfeba18aafee6c2465f749f53aa86afd11d76631c76700b8916aa381d84a8a262ea771c46403839d32e397b5b11ac0c1fbd74b58054abd47cf76ffa7abc4eb83180e39f92dcf5d17d6df06587a3f2d4f0ac5ea7b68c66ceb46599712134bb9b5dad5636b9bb5eabc9b97ac0140e4dfc84a6af1efd219877e5faa362539b1bb327118b1c9396edeee9dc71e35158241cbee145a492bdbbd2af1a75c006ee269d2dcdb0b661e14b81757a4286b26087349969e9bb76e6d3e36098ad0bfb136b5ce120a050f1e99342b8825fba27d07b8f6a57780ac44463e8758f3281f151c79a71c54d9b280c3c1dbdcb971ac57dc77fe857a61c56529efc1c9338e07fc70d54b94d79708d1fa6f0af7846f29ba0cd3ccb316464b0ed8c885c8b659ccafa72d2c663d7d9b61744d17ca7c74724853ea69a31629238d9db1d9a24d11294871e24779b1bbd26bd2ad300c85ff7daae383efc6f82cf653d8d6f36413937a7653d73aeee8c53b09f882b4e582f2f1b0133ee4cc00eee7a1a94994d75b560168a947edfda7f53363ae702f88ead37474793687abce9307ef8689ce2aa1f816d5ab533b4435c15ece00ff86f19cda99b0ba39db45725ae6f891f94e2db4ddb960e8432dc5871a3a18d37341ff1014c0e628f5d49f80e872b86d7c4e784b9fd778d748282efe6bff88a613130bec7b6041e4875ee10952b2502b6fdb5539cb93f87478319dc8d3765b6e8e26a138c69baa30be8135eaf8f3718cb6093310818bc6ff66feb1cc2c4ef1398e1550c6dce31e6bbaeee47547a545df65e017ca7ab0cb0838534ea305cbc4766d2677d04d603136af6112aabfb0095ece3e41ac8dd2293acd53cb7872cce4b00d4cfaae08de22e140e912452a8ca21aa1141f8904db3c3a496d9c4dc552eb35467c886f01172", 0x1000}, {&(0x7f0000000b00)="3b539000a8e20f63fa678928f496ffc4e95e5b7c0c73c9e509d20fa625602ea6e012eb13a152afc45e088befde57a7004a0a8edf69ce34e94d5d9b61eae40b09d88b766ec439041d2ec25354e72915b66fd514a11d1875c1a159670b559caa3364c894787db35c114ca67ecea52bbb76439ec6f70c71842b78f8d94e1ac7bce1ec0298041e389a487a171f887c25ffec08b4afa078ca5b83e3f3ec30e937fe60c66427860852fb0d71888e5f20f8802d743be27d", 0xb4}], 0x5, &(0x7f0000000cc0)=[@ip_retopts={{0x24, 0x0, 0x7, {[@timestamp={0x44, 0x10, 0x8b, 0x0, 0x8, [0x2c78, 0x8, 0x7]}, @ra={0x94, 0x4, 0x7}]}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r6, @local, @rand_addr=0x64010102}}}, @ip_ttl={{0x14, 0x0, 0x2, 0xfffffff8}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @private=0xa010100, @broadcast}}}, @ip_tos_int={{0x14, 0x0, 0x1, 0x3}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x5}}], 0xb0}}], 0x5, 0x0) (async) [ 2892.022526][T23568] bond1120: entered promiscuous mode [ 2892.028896][T23568] 8021q: adding VLAN 0 to HW filter on device bond1120 [ 2892.164254][T23569] bond1120: (slave bridge1146): making interface the new active one [ 2892.173768][T23569] bridge1146: entered promiscuous mode [ 2892.190650][T23569] bond1120: (slave bridge1146): Enslaving as an active interface with an up link 17:04:17 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3f6, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2892.297955][T23572] bond686: entered promiscuous mode [ 2892.304977][T23572] 8021q: adding VLAN 0 to HW filter on device bond686 17:04:17 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x2d}]}, 0x3c}}, 0x0) [ 2892.375695][T23577] bond1047: entered promiscuous mode [ 2892.382878][T23577] 8021q: adding VLAN 0 to HW filter on device bond1047 [ 2892.397031][T23582] netlink: 56 bytes leftover after parsing attributes in process `syz-executor.3'. 17:04:17 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f00000000c0)=ANY=[@ANYBLOB="5800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="000000000000000075ff12800b0001006261746164760000280002800c0001004241544d414e5f56040001004241544d414e5f560c0001004241544d414e5f56acb28478fbaa1ab308f4b2eff3f36558dbec1c0c16ff78dfbd29c263e0c8a05b551d40933dcaaf26a4e704"], 0x58}}, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000000)={0xffffffffffffffff}) getsockname$packet(r2, &(0x7f0000000040)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000080)=0x14) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f00000000c0)=ANY=[@ANYBLOB="5800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="000000000000000075ff12800b0001006261746164760000280002800c0001004241544d414e5f56040001004241544d414e5f560c0001004241544d414e5f56acb28478fbaa1ab308f4b2eff3f36558dbec1c0c16ff78dfbd29c263e0c8a05b551d40933dcaaf26a4e704"], 0x58}}, 0x0) (async) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000000)) (async) getsockname$packet(r2, &(0x7f0000000040)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000080)=0x14) (async) [ 2892.584195][T23584] bond1047: (slave bridge1113): making interface the new active one [ 2892.597221][T23584] bridge1113: entered promiscuous mode [ 2892.626392][T23584] bond1047: (slave bridge1113): Enslaving as an active interface with an up link 17:04:17 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x9e, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2892.712327][T23588] bond810: entered promiscuous mode 17:04:17 executing program 0: r0 = socket$packet(0x11, 0x2, 0x300) setsockopt$packet_tx_ring(r0, 0x107, 0x5, &(0x7f0000000100)=@req3={0x8000, 0x6, 0x8000, 0x6}, 0x1c) sendto$packet(r0, &(0x7f0000000580)="572e6ce9426bb12116f72baef9cd5c7f31e018", 0x13, 0x10, &(0x7f00000006c0)={0x11, 0x1, 0x0, 0x1, 0x4, 0x6, @random="f396d2d0759e"}, 0x14) (async) r1 = socket$inet6(0xa, 0x2, 0x0) (async) r2 = socket$nl_route(0x10, 0x3, 0x0) (async) r3 = socket(0x26, 0x80000, 0x0) syz_genetlink_get_family_id$devlink(&(0x7f0000000240), r3) (async) getsockname$packet(r3, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f00000000c0)=0x14) getsockname$packet(r3, &(0x7f0000000280)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r2, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000500)={&(0x7f0000000180)=ANY=[@ANYBLOB="7c0000001000390400"/20, @ANYRES32=r5, @ANYBLOB="83080000000000005c0012800b000100697036746e6c00004c00028008000700ffff000014000300", @ANYRES32=r4], 0x7c}}, 0x0) getsockopt$PNPIPE_IFINDEX(0xffffffffffffffff, 0x113, 0x2, &(0x7f0000000c40)=0x0, &(0x7f0000000c80)=0x4) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r8 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r7, &(0x7f0000000200), 0xf000) (async) sendfile(r7, r8, 0x0, 0xf03b0000) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r9, r8, &(0x7f00000002c0)=0x335773c3, 0x8) (async) bind$packet(r9, &(0x7f0000000540)={0x11, 0x1a, r4, 0x1, 0x6, 0x6, @random="73e9ab7fb3d6"}, 0x14) sendmmsg$inet(r1, &(0x7f0000000d80)=[{{&(0x7f0000000040)={0x2, 0x63, @remote}, 0x10, 0x0, 0x0, &(0x7f0000000000)=[@ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @empty, @initdev={0xac, 0x1e, 0x0, 0x0}}}}], 0x20}}, {{0x0, 0x0, 0x0, 0x0, 0xfffffffffffffffc}}, {{&(0x7f0000000140)={0x2, 0x4e21, @initdev={0xac, 0x1e, 0x0, 0x0}}, 0x10, &(0x7f0000000f40)=[{&(0x7f00000002c0)="dc87ae05385c19d6e8ed2d5958c96165d8ab1a829a3cea158ff38ec71a1978f56531e0f25371cccf61b6700d1c9cc25ea72d3c24f0690df682b643222f4497125903e59147029069b74f63d35f24e307a77ebd42683eff8fdc98bd250930429ce6a0c3c5da7dee48c39b80038079d34d1ca0e2eb2a6dd682117e50249fa873add9f2946c24f62a1f98fd58e18d71029657e30e92c85f02e35b6321e799a38d06928a99affd91c706a903108bcec86bf20300ccaa20809fead1ce2a50bac9ee436fc900212feebdd56836b0d2143a5f615b3c2792a942cc217507ec191299e523ac289b3e0dfbb2a6dd3bcc78d16b28", 0xef}, {&(0x7f0000000fc0)="b39c1f56d08b488343fcccfa0c8fbbe27259dbbf04aa9b2613943c72272bc9ef7142fd774c551413f961b33a4bf9d6d690de08e031b27327b3319bef0373d269a18f063d2ad2cdf8b1c6c7047e350f578663cc3e12bca7d301e181fa6dcf4486d2af15d6b00b6f0afe2c9a520d5c9e9bfc5b387432d1254a98132509fc", 0x7d}, {&(0x7f0000000440)="ad61563d14c5e0fed504a2ec73618a9015f3e912ba0a2ae092082a2e1940a8780e3f4f97d7765f7516763f8fa6d2fd03a3b72b7903717e7e89776acf8bb423ec3f16fc370ec5844e560f434bb2c1cdebe592eba037852b252de0663eb45e28725c924b3771292da6070b51ceb564fabbe5d0f73e637ca014161d", 0x7a}, {&(0x7f0000000ec0)="c0217b9b9a52455554742a7fb0a53399b53a9e3b98f3a292f3391620bd2fc204a7d7d489e6c43186fb0e7004c5eadc3392077bcbf7e2a807591fcfebc5c03a2b8556158367090868c5b2dc603a03aee0d1c6f0d88ceb8f205885f71a678b8e9f667ea019ea3a46039d526d", 0x6b}, {&(0x7f00000005c0)="333425b8bf13b81ce96d57f28b0a0c2ead98f99c6357f6fcb7e507b81d9697228a99efec5f95c6d94ca345a5d35246d0724d2ad197acb1b548bae8474fd0fc1620b1d083cba852d761f1da5cf0bd1806a846f535d162c89a3221822fc1e7c776faac237149f66f59991eae894a29d4fe8c0f4e41d06b5e6638bdd86816509ab7e85032ae4c110506f81064bd99cafd357067b3bcdc4956d28d96468a33dd751f2103cfe108153f2858d8dfee23271178abdd9dc5c56c0404e5a592c0e437a15d1a94d3d1288e53d507a04ffb309ff382c0446a42d98f065644044481b1792d92e2b0337270271c4d2db49061f1e95becaa8c1c9e3ec243b4c1a2960b21", 0xfd}, {&(0x7f00000004c0)="16d722aa7b6d13f7c902e510ab49dab4a7ce5b384dfc86e048b35cc83588825884c1abf2fe179142f68ebb98972677c9d4fec1a31efa84", 0x37}], 0x6}}, {{&(0x7f0000000740)={0x2, 0x4e22, @loopback}, 0x10, &(0x7f00000008c0)=[{&(0x7f0000000780)="ea57116eaa1b0f0fd2c885d2654ca2e3a7a677ad0c0594c9121851f4626bb5d5f6d0bc7e8bf95cd17257449e1faa4cad", 0x30}, {&(0x7f00000007c0)="ad5e4ecbc071ed99de56c354712042e8b8fb1f7bc9d129f8abbc8c609cdd6e6fdad4100a8cdc4364b6609374d0d5ff14c413aa4845abd526bb211f1176361e5c2e45336b5c2a6dab88aa5758a43b6bd1b8edc49ddf1178bc452ebad98a4730cef6023bf1c138243e2528aa8744bac0a45897ceb6d4fea2d2ae0e0a7962a28129bae0a28f8f865fc6badad6b5e647b5dcac2cdf46333cbdaef6336132457dc51056147033b6e83ad5d60344df2ee7a1d32cb3fb49bd840a18c17665209aaabb4198f8d22925a9a6c4b27baae614eaa5a304019c651e100abfe7f58299f83388a97f8a5a194343f78045e51d698c51fe9902b7183442620bb5b44c59686cbbfe", 0xff}], 0x2}}, {{&(0x7f0000000900)={0x2, 0x4e20, @dev={0xac, 0x14, 0x14, 0x25}}, 0x10, &(0x7f0000000bc0)=[{&(0x7f0000000940)="9bbe318b08799c2b09893789f703", 0xe}, {&(0x7f0000000980)="adc3540f64c28db8cb5f98ec16b2d3d2783519282da6f1582fd6acdb2042f1ce4770b19079ab95e5a7a6571931f86c78ae7cf409be906214d3e709887b86304426095733c1d19aabee67c000716129ece11a6f7e7c15da004ad7360ceed8d7cd84dac44d32d23a826f4ce2f11a3b6d24258b27deda9b8bd976c0e3ce3d60c70f8fd68586363ae6ccc141f608f3f3fa527daf66099b23332b2285a5e18e2b6db87c88", 0xa2}, {&(0x7f0000000a40)="4d4f3a1c328254bed4b1c5450374bf5fa8b680b1c51b73d37cf632b43a54097963fa199855c8640fc0119f30cd416bfdb13fadc7cf8b3a71b9040dfeb3cbdd9c5efb779620008cecf1e52d05639eb8b0d1b85d9a3aad8f16c37e547756903c3864f43c7834539906076400741210f7f30b680465d7652cb629cb24c6dba1cefe2477aed6bad008ad939a356d661afe3ee60a949206f523daa2e50557053f497adfd34acccf0684524caf29f0e395b8cf9e40", 0xb2}, {&(0x7f0000001280)="78cb8d4da48da241aee34e5214efc55e817e5299aefe89a5fad5c369f097b0b4804e4e45a819add0e37f8316065d92648481ae4fd98455bd905e107d6bb4c2c47ce7c92c183978a55aed4e55e3d784b9545cb58dce3f668a9362f555cf78f5fcd411e4453377801fd92aa5a65d922ddea461a71039a429e79b874ef6290b6fadc6fc1af7fbe7763412209ae55212d97bcad4f7f5c31ccf7418ec7746193c0f8ff22ab19698aadf810d85cd9b765c619cd612ae26cd3d3c6cd92a346ced9ebc028e6cdc4affe5455a307983440a02071def9a6e183580d91235093ab9220088ebdeecef19f6ded9a14d9e988e9a0f6792aa1561e83da436e20db9e6a6e85d8155c73b20cdee5ced44f3fa7a5d570b6a1eaf8e7d601f1dc3bb6adb8a3faa44c5713aa2c14468f56e3b367381594f983e07cdb146babe5795a1daa110e52ad9449bec0007de93ff8dacf15130ab29591c83c4ed48f218f954e724d60328562eafbbc642348d5b521a2ad7c63530d73ec5d8eeef4c526a5902a8fdb4c53180d35211ace2c26e20c97aae6ed2c135c3ea4c65b6baef154a7997dc3dae44cd1175977f5c59ff64ad40a5b707cb3ac681356f92e3b1fe635cddad54406d2d831b28217c10aa8fa45981779175528e2f8686f384669e2deeea2fb286c52e2bc3f2d1d19257605372263333a5f0d1673630b9208877caddd546fb42775afd6f50eb35491422c0d8df3db5fbc71e461edae26091d5efcd4385cd636ffd3be606e362bce2cf4a66b84885e6f9658968ff01ee81ff967ae8365b79d55c8939745a8bede19e332aa7532cc0155cd5eb5480e32ff7a94b203e4db70784e32364c1f070b5af1c944e513bed80bb3a02cb722917181b8d313918dce28be4f815009b3026c787f345263c34f56b172ae62880cacc8e1338d7935a319bf375860b0d7f502cea86aec94d7fe6084fc21ef1ca8c0427a5952eebdb12e6cda414ca381afd21093f773244a3c7f632daddfc877fe6148b164906c6deb7bac84982691318c448b73bf634ba80819528d56063c5a760137ed5cc4c9c674e56168ff5a84c91681881733f4813b22961757f267c4ffc2798e6ad4dd21f27d0e7a61d7921d4f252b30b799e622041cb18426a12bd9f77be2b10e56ad9d26ab97eedc85ef92e6ec4212201766ddd826a012c3a5377a927d203f59a547449498c9091d0f7f76a8ed1fa6841b14d7086400b3c13f4c8d61edf1e477c1fcecc23ccb25b0a4476a4de55b2d45cde8dc8d3fb5c44d8fa1f2f8881f47fbe058ec1f66df6f24c00b904873965f5a6d3ae24d79a39c59b8122785d3d2a7986060a72b79572fe01a3aa43c8cdb45a565450f42e0f9f4120be3fa2fe4f2df848044f2034f01a618bdd86f93b4518955f695bb2a8a026da92c331d7e5daf90ca1875bea747097dfa16d3da173eb2e24f36d4ba9f567b3035734acb018cb2f99b6e2d22bc4b18cbd522d7abdbd2aaab40ecec9e2c9b6754027950a9726808239916d23c8be6eb902721d9983cda3821de1b7fc3132bd2ca421fc03eaffd6fb51db22ffe5a8603f48277ccad1a79722ef2ae469960a226ccd46cbbe15b1acacddc8d2863c3ecac6d99f83f204bd24c381045fef1832dc15374104a9000cde56a0555b7408b00a56d61a348359a4e30de2605d39b9694999b607a9d5a559071c95ec4f1ece63245141497ea486394daee029a39b59c4f283f3b93ba675b5f1e5cd76e4858a0d0af22a2619d3403d2e75f5803b4a765c68e58578b6c3ae7a0611a7a89740ee0dd93c195019ce3e563507e3924f51a8695a79e6a2c05f072d7813d3ba215926bbdb345818216cc7ae39d46f11e82775f3bbe3679b4eb4faa4d2b5c1fe1595a899847fbec391a8235b6f37bd2fc527a44a54978f63d6e241e4e12157ea2e53647f88d4bb9aca48955317576e912335b20f09d5bf6ea4ccc7f83f7ecf3cfc039166f1c88947679fb16864b8257c2af3c807560d30eb5d226ac3e834a9ebde3159d2b1c9de963b3217c52e005816c339f672b137902991deab2ba7d8a837d5a68345f9db6aadff13647654705e4739bfa50f6e6065361255c26658952b00f770be26764d8e3730e085ca68c541ab04ac5985694fca2545485cc132ae33fb0ffc40e3535b318632ba4be7e0fbb77b57bb1889479fd89a72912b7c81ae8dea9dc2083edc597a8a4c0327d5e2d26a270b1688365a1d551657c36b16dc321c7361085b660d1c6e452148f03fbd67142a59c73e49bf5f1182f5688dfd557bd8448bb09b15e13339fca077562d120614c535d748cb338c2bef477b606082b10e51634ffa639bc728ece96189f1a3f48d9e06decd47dc32f01e51ffc531ea9e4a92a0f3b136c0597f4e85e0f00d45061660e80f6417fe56ebda9a2532eec07fdb7b16a9e156b1f95f0df46286434e96038ca06bb0a5b1c721356c15c51122313830cd75fd6fd64800e8281612e72ad99a8aef10a39a3bad8f92e031ed6321081b9d1f0c5c64dc406affa89fec40cf150e9035c8658a7d0a63da5bd713530475e80699261e3cdb88e8563ff9ca792a62d83185f686fac1d012b630c1b919baaf494ead9624932985436eea97019a8bd1b258bdd765c44c93952409cfc3f6506510ab6b48d51885d9af2a5033bfc204390e2896341af68ec7d5f95ca7a5cf40e5c1361f5c9224008ba54d5c0e299935f1c3f779d942b546036277a1e3065ca664f2a234ae7852c79bf681c6745a3568a8ce10b9453d05ce4fbd5631c7c5d22e9df045b34c8fedebb9f0b6232f7949462f11ffb0c8b8ea2c716803d9bb5bc6c11ed3ff9031d1a26e3b0e89d51d7e2aca47ae6323967c547d07345d0b93d9fa1cd3c478bc49e354641a43cac4f8f61b637a1fdfd6157e0e9b6d6afcde54ca9319774ce437b80e5f65e7b863eec04e67c362bc21c63f368d9d40bffaab025f220f5562d0a1441cfd484f074396b68c21dffc5a4dfb071b8de2f3aa89b747593448bfcdc254febbb8207d19c8df54b7a3f208e0686784d21cb75e7321bb7cb041719223abeb9e73f64f085122fd837c88a2aa0dcc4da793ed4155a2fc11c82405cc52652400d44050d2b54883c09123c3c3bb8f4c6a72f737b1d81debea5472b0b041ed5ac0555a1f39074c5dcd88f4d7d94363261dfc17c37917a937318390fbfb5480754eaa2b53b1545d516563adfeba4fbcdade5e79aed55e5a60889b4f69c814da4f57d5cc3daefd55b93efca01f013334ac882060e1aa50f5ddf0f7e6b79b3ff53688b76c3008fa7a5e5f6497192a1c04ed4fe85ec026871d2478b3d27199fb938a97c4ae0cd59d29e810bee98afba9e052e144de58aad7e2e8748419ae2afa997a59576bf7ce4df35328a38729495750028bba8838c3835875a0f3a7bc19da0e798e86c1ada59fb5b31a86f6daa34ac3a9a2729935c662bf82e3b79d3737ebf9bb62713bbc7285c48d941363b0d67eaa23fb65998ce367da525383877328d466fb95408b7d1160036e1faa1776e9bf6a838eda4ce9e60f00a805735734bd9a7a8282af21174e1e6406455a1dc5edbe4bf94e9160725713ccfe356bbf5363adf1f18856b04049cbe021d5e3d0b7119724949495db9a8d6b53868103cfcfbcf178e8f8180d4b9b9a06180d69412de6ad3ea6fe40a299a170fe531aba2a5937fe1b951e218bccfc3510f242f6dd9cb9485fb72014192b1309fb357beab8055dc0229d9bc4994373eea2a7e06ba07d284ada708fd76606653849d2a7f337a1a1963b88ba9919ee207be74735ee5b6b1fd6fcd5b9c5145eee649a61b57aa90530061a65edbe6fe285cea5563870d97c339238f4c27eff1db01d0e7cd4ee47ab70a440fe21b71eb90afc2fb9a70a26797c083cce7f675c42db8482beeacfeb88080d6a552202d19cf23831fe2c82fbf9dfca67c72a159c636df11b75cf695fe91753e34cce8ee13227a0968c90582f830ef2fa5c63808e611866b6cb75479381f9fef1e4e9eeb414a2d9c9210e75daa8131f1795e40fdb876245d1379c9d653c8167228d512a9453eeffc263b7c7b9e5a8f4d8faf397cf602b5695998d915fa6b09397b68ded52c82869c339321c09b938926cabbefba5439368cc939ec7223bbc1e806227fc076e00df96a8363e0c62bda5838a70195a40753d9f5ffb075ff466b260949967d1eafc184caa1f9e04a4af1158fbaacc171aba67b61ada521e0377cd0c924c2d76cd6ba470361f5c179103c0a6ccb9aaf0e13d6272cfad4fc640548b82cb54401402e291379c45e1aa8fd3ccb70dc8bb3797feb1bb15b5c9b933e4d985ca3965d20fe8327eb088321a3668c276deb1681ac59902f2e90f17cc575875ed8f9ceefe9fc21cd4d6758cbd7d40648180be35d79b23a382ce9466aa97438da29a4506272aa6c74c1691b96253fbcd61711eb19dec67bafef7cf771a1c929ae56151a3d8182b0c776b7c5e49703dd5272b4b6888df337cef443d01ebc6abb17a64c3cc4741de9980d9fddff409e8fbc47fa3c995a9dffe33b06261974460d93ba1abfbe656768630e256d9a4183dc66a81a8b293e5f4f74dc795d8a46265d2aff4c0430f8a085d0e97857acf474e513d9c69b233db05b476172a57fdb8273e496165d398105c24aaa7be08bae51651603b90699e02667cf2ed17eee6db95ee31a5cb379ebd66a407f3858e46edc61dce48493883822cf6ae539ed0dd8adaa9c09896d02224c2c9bc9af43691486095436df7f931da2444dabfa56115c68babb9a3828fd1c9d355a1af50905f8a62aa55cdafe269951bc13c1b80f1a37aa6de192f0000e3c58cfe478e50fefa290f8df749e2df04f17fe2d7edb20fdf426e2eb9101a007d2ab9fd9dfeba18aafee6c2465f749f53aa86afd11d76631c76700b8916aa381d84a8a262ea771c46403839d32e397b5b11ac0c1fbd74b58054abd47cf76ffa7abc4eb83180e39f92dcf5d17d6df06587a3f2d4f0ac5ea7b68c66ceb46599712134bb9b5dad5636b9bb5eabc9b97ac0140e4dfc84a6af1efd219877e5faa362539b1bb327118b1c9396edeee9dc71e35158241cbee145a492bdbbd2af1a75c006ee269d2dcdb0b661e14b81757a4286b26087349969e9bb76e6d3e36098ad0bfb136b5ce120a050f1e99342b8825fba27d07b8f6a57780ac44463e8758f3281f151c79a71c54d9b280c3c1dbdcb971ac57dc77fe857a61c56529efc1c9338e07fc70d54b94d79708d1fa6f0af7846f29ba0cd3ccb316464b0ed8c885c8b659ccafa72d2c663d7d9b61744d17ca7c74724853ea69a31629238d9db1d9a24d11294871e24779b1bbd26bd2ad300c85ff7daae383efc6f82cf653d8d6f36413937a7653d73aeee8c53b09f882b4e582f2f1b0133ee4cc00eee7a1a94994d75b560168a947edfda7f53363ae702f88ead37474793687abce9307ef8689ce2aa1f816d5ab533b4435c15ece00ff86f19cda99b0ba39db45725ae6f891f94e2db4ddb960e8432dc5871a3a18d37341ff1014c0e628f5d49f80e872b86d7c4e784b9fd778d748282efe6bff88a613130bec7b6041e4875ee10952b2502b6fdb5539cb93f87478319dc8d3765b6e8e26a138c69baa30be8135eaf8f3718cb6093310818bc6ff66feb1cc2c4ef1398e1550c6dce31e6bbaeee47547a545df65e017ca7ab0cb0838534ea305cbc4766d2677d04d603136af6112aabfb0095ece3e41ac8dd2293acd53cb7872cce4b00d4cfaae08de22e140e912452a8ca21aa1141f8904db3c3a496d9c4dc552eb35467c886f01172", 0x1000}, {&(0x7f0000000b00)="3b539000a8e20f63fa678928f496ffc4e95e5b7c0c73c9e509d20fa625602ea6e012eb13a152afc45e088befde57a7004a0a8edf69ce34e94d5d9b61eae40b09d88b766ec439041d2ec25354e72915b66fd514a11d1875c1a159670b559caa3364c894787db35c114ca67ecea52bbb76439ec6f70c71842b78f8d94e1ac7bce1ec0298041e389a487a171f887c25ffec08b4afa078ca5b83e3f3ec30e937fe60c66427860852fb0d71888e5f20f8802d743be27d", 0xb4}], 0x5, &(0x7f0000000cc0)=[@ip_retopts={{0x24, 0x0, 0x7, {[@timestamp={0x44, 0x10, 0x8b, 0x0, 0x8, [0x2c78, 0x8, 0x7]}, @ra={0x94, 0x4, 0x7}]}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r6, @local, @rand_addr=0x64010102}}}, @ip_ttl={{0x14, 0x0, 0x2, 0xfffffff8}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @private=0xa010100, @broadcast}}}, @ip_tos_int={{0x14, 0x0, 0x1, 0x3}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x5}}], 0xb0}}], 0x5, 0x0) [ 2892.738435][T23588] 8021q: adding VLAN 0 to HW filter on device bond810 [ 2892.765893][T23590] netlink: 44 bytes leftover after parsing attributes in process `syz-executor.0'. 17:04:17 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x300}, 0x0) [ 2892.874658][T23599] validate_nla: 15 callbacks suppressed [ 2892.874687][T23599] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:04:17 executing program 0: syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl802154(0x0, 0xffffffffffffffff) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(0xffffffffffffffff, &(0x7f00000003c0), 0x1c) listen(r0, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000200), 0xf000) sendfile(r1, r2, 0x0, 0xf03b0000) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write(r0, &(0x7f0000000080)="55498e290c9056a6bcf9fed37254c9443e14d7dbc32d751c3a8b680a1448d0353622710a201c340861207162090df23b15d585199f2c613eb8c73e8898d3c154f083a598dbbda569da6b5f337061e8880ce43686b1d82d4abf5f080818d230acf9c97a51c14de27c3aa6d517a0c46a", 0x6f) sendfile(r3, r2, &(0x7f00000002c0)=0x335773c3, 0x8) setsockopt$inet6_tcp_TCP_ULP(r2, 0x6, 0x1f, &(0x7f0000000000), 0x4) unshare(0x48000000) 17:04:18 executing program 0: syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl802154(0x0, 0xffffffffffffffff) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(0xffffffffffffffff, &(0x7f00000003c0), 0x1c) (async) bind$inet6(0xffffffffffffffff, &(0x7f00000003c0), 0x1c) listen(r0, 0x0) (async) listen(r0, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000200), 0xf000) sendfile(r1, r2, 0x0, 0xf03b0000) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write(r0, &(0x7f0000000080)="55498e290c9056a6bcf9fed37254c9443e14d7dbc32d751c3a8b680a1448d0353622710a201c340861207162090df23b15d585199f2c613eb8c73e8898d3c154f083a598dbbda569da6b5f337061e8880ce43686b1d82d4abf5f080818d230acf9c97a51c14de27c3aa6d517a0c46a", 0x6f) sendfile(r3, r2, &(0x7f00000002c0)=0x335773c3, 0x8) (async) sendfile(r3, r2, &(0x7f00000002c0)=0x335773c3, 0x8) setsockopt$inet6_tcp_TCP_ULP(r2, 0x6, 0x1f, &(0x7f0000000000), 0x4) unshare(0x48000000) [ 2892.997451][T23599] bond1121: entered promiscuous mode [ 2893.004316][T23599] 8021q: adding VLAN 0 to HW filter on device bond1121 17:04:18 executing program 0: syz_init_net_socket$nl_generic(0x10, 0x3, 0x10) (async) syz_genetlink_get_family_id$nl802154(0x0, 0xffffffffffffffff) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(0xffffffffffffffff, &(0x7f00000003c0), 0x1c) (async) listen(r0, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r1, &(0x7f0000000200), 0xf000) (async) sendfile(r1, r2, 0x0, 0xf03b0000) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write(r0, &(0x7f0000000080)="55498e290c9056a6bcf9fed37254c9443e14d7dbc32d751c3a8b680a1448d0353622710a201c340861207162090df23b15d585199f2c613eb8c73e8898d3c154f083a598dbbda569da6b5f337061e8880ce43686b1d82d4abf5f080818d230acf9c97a51c14de27c3aa6d517a0c46a", 0x6f) (async) sendfile(r3, r2, &(0x7f00000002c0)=0x335773c3, 0x8) (async) setsockopt$inet6_tcp_TCP_ULP(r2, 0x6, 0x1f, &(0x7f0000000000), 0x4) unshare(0x48000000) 17:04:18 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000180)=ANY=[@ANYBLOB="48040000", @ANYRES16=r1, @ANYBLOB="01edff000000000004003b1c210008000300", @ANYRES32=r2, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001505050505050"], 0x448}}, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r3, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r3, 0x0) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) socket$nl_generic(0x10, 0x3, 0x10) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) r6 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r6, 0x8933, &(0x7f0000000dc0)={'wlan1\x00', 0x0}) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_FRAME(r6, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000001240)={&(0x7f00000001c0)={0x3c, r8, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r7}, @void}}, [@NL80211_ATTR_FRAME={0x1e, 0x33, @deauth={@wo_ht={{}, {}, @device_a, @device_b}, 0x0, @void}}]}, 0x3c}}, 0x0) r9 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x4, &(0x7f0000000040)=ANY=[@ANYBLOB="18000000000100060000000077f2ab268500000023cd730000"], &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x45) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000001c0)='kfree\x00', r9}, 0x10) r10 = socket$nl_xfrm(0x10, 0x3, 0x6) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r11, &(0x7f0000000200), 0xf000) sendfile(r11, r12, 0x0, 0xf03b0000) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r13, r12, &(0x7f00000002c0)=0x335773c3, 0x8) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000400)={&(0x7f0000000280)='fscache_access_volume\x00', r13}, 0x10) sendmsg$nl_xfrm(r10, &(0x7f0000001380)={0x0, 0x0, &(0x7f0000000100)={&(0x7f00000002c0)=@newsa={0x138, 0x10, 0x1, 0x0, 0x0, {{@in, @in6=@private1}, {@in6=@private1, 0x0, 0x33}, @in=@multicast2, {}, {}, {}, 0x0, 0x0, 0x2}, [@algo_auth={0x48, 0x1, {{'cmac(aes)\x00'}}}]}, 0x138}}, 0x0) poll(&(0x7f00000000c0)=[{r4, 0x712}, {r9, 0x20}, {0xffffffffffffffff, 0x1000}, {r10, 0x80}, {r5}], 0x5, 0x400) sendfile(r5, r4, 0x0, 0x10000a006) [ 2893.275627][T23602] bond1121: (slave bridge1147): making interface the new active one [ 2893.295252][T23602] bridge1147: entered promiscuous mode [ 2893.373592][T23602] bond1121: (slave bridge1147): Enslaving as an active interface with an up link [ 2893.392745][T23604] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:18 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3f8, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2893.423000][T23604] workqueue: Failed to create a rescuer kthread for wq "bond687": -EINTR [ 2893.513057][T23611] netlink: 56 bytes leftover after parsing attributes in process `syz-executor.3'. 17:04:18 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x2e}]}, 0x3c}}, 0x0) 17:04:18 executing program 3: socket$netlink(0x10, 0x3, 0xf) r0 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="4800000010001fff0000000000000000000000008e6ff3ca2bfed65d01e06bc138488e6d76c35efb9a9c7c6572768d43000673092f454be6b1ee6a2c4e1fd12f824a84e0400cc74f4a849d7e689d836902441dcb8fa707a282ab51534f23abc907dad2b35137dc93", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x0) [ 2893.607242][ T27] audit: type=1804 audit(1688403858.564:424): pid=23645 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/2029/cgroup.controllers" dev="sda1" ino=1962 res=1 errno=0 [ 2893.656410][T23619] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2893.761451][T23619] bond1048: entered promiscuous mode [ 2893.774975][T23619] 8021q: adding VLAN 0 to HW filter on device bond1048 17:04:18 executing program 3: socket$netlink(0x10, 0x3, 0xf) r0 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async, rerun: 32) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="4800000010001fff0000000000000000000000008e6ff3ca2bfed65d01e06bc138488e6d76c35efb9a9c7c6572768d43000673092f454be6b1ee6a2c4e1fd12f824a84e0400cc74f4a849d7e689d836902441dcb8fa707a282ab51534f23abc907dad2b35137dc93", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x0) (rerun: 32) [ 2893.930517][T23620] bond1048: (slave bridge1114): making interface the new active one [ 2893.943750][T23620] bridge1114: entered promiscuous mode [ 2893.976793][T23620] bond1048: (slave bridge1114): Enslaving as an active interface with an up link 17:04:19 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xaa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2894.033532][T23628] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:19 executing program 3: socket$netlink(0x10, 0x3, 0xf) (async) r0 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[@ANYBLOB="4800000010001fff0000000000000000000000008e6ff3ca2bfed65d01e06bc138488e6d76c35efb9a9c7c6572768d43000673092f454be6b1ee6a2c4e1fd12f824a84e0400cc74f4a849d7e689d836902441dcb8fa707a282ab51534f23abc907dad2b35137dc93", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x0) 17:04:19 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0xffffff1f}, 0x0) [ 2894.077010][T23628] workqueue: Failed to create a rescuer kthread for wq "bond811": -EINTR [ 2894.235732][T23648] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2894.365537][T23648] bond1122: entered promiscuous mode 17:04:19 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r1, 0xc020f509, &(0x7f0000000100)={r0, 0x80000000, 0x8b39, 0x2}) sendmsg$nl_route(r3, &(0x7f0000000280)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="240023d2b0d94f6031b9ab5f2616d003843e190010002bbd7000ffdbdf251c201400ff042709cb000008000100c699d7c9074ae1cd2d3968d2493f6f447c6fa4a03294ae2e23573c25eff58f8e7317057f82c6065005d86a4abeac8d21d22c232af2aa9266cc6e634b534f524afbe21e7b1d65878e0e628960d782c15f5bf5b6f88f25c95aec4098175288ee76c1dda9106c993642faafbcb48e6ecc48ddfcd67d4f61c4efd47ecb0ee0efbf60f11405881058e1f542789b232583e4"], 0x24}, 0x1, 0x0, 0x0, 0x2004011}, 0x4000) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) ioctl$BTRFS_IOC_SUBVOL_CREATE_V2(r1, 0x50009418, &(0x7f00000006c0)={{r2}, 0x0, 0x0, @inherit={0x58, &(0x7f0000000080)={0x1, 0x2, 0x8, 0x6, {0x0, 0xffffffffffffff10, 0x10000, 0x8000, 0x7}, [0x5, 0x152]}}, @subvolid=0x8f}) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) getpeername$packet(r1, &(0x7f0000000000)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000040)=0x14) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[], 0x48}, 0x1, 0x0, 0x0, 0x404c014}, 0x0) [ 2894.387826][T23648] 8021q: adding VLAN 0 to HW filter on device bond1122 17:04:19 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket(0x0, 0x0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$F2FS_IOC_MOVE_RANGE(r1, 0xc020f509, &(0x7f0000000100)={r0, 0x80000000, 0x8b39, 0x2}) sendmsg$nl_route(r3, &(0x7f0000000280)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="240023d2b0d94f6031b9ab5f2616d003843e190010002bbd7000ffdbdf251c201400ff042709cb000008000100c699d7c9074ae1cd2d3968d2493f6f447c6fa4a03294ae2e23573c25eff58f8e7317057f82c6065005d86a4abeac8d21d22c232af2aa9266cc6e634b534f524afbe21e7b1d65878e0e628960d782c15f5bf5b6f88f25c95aec4098175288ee76c1dda9106c993642faafbcb48e6ecc48ddfcd67d4f61c4efd47ecb0ee0efbf60f11405881058e1f542789b232583e4"], 0x24}, 0x1, 0x0, 0x0, 0x2004011}, 0x4000) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) ioctl$BTRFS_IOC_SUBVOL_CREATE_V2(r1, 0x50009418, &(0x7f00000006c0)={{r2}, 0x0, 0x0, @inherit={0x58, &(0x7f0000000080)={0x1, 0x2, 0x8, 0x6, {0x0, 0xffffffffffffff10, 0x10000, 0x8000, 0x7}, [0x5, 0x152]}}, @subvolid=0x8f}) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) getpeername$packet(r1, &(0x7f0000000000)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000040)=0x14) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[], 0x48}, 0x1, 0x0, 0x0, 0x404c014}, 0x0) [ 2894.567598][T23651] bond1122: (slave bridge1148): making interface the new active one [ 2894.589430][T23651] bridge1148: entered promiscuous mode [ 2894.614225][T23651] bond1122: (slave bridge1148): Enslaving as an active interface with an up link 17:04:19 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) (async) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000180)=ANY=[@ANYBLOB="48040000", @ANYRES16=r1, @ANYBLOB="01edff000000000004003b1c210008000300", @ANYRES32=r2, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001505050505050"], 0x448}}, 0x0) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r3, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r3, 0x0) (async) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) (async) socket$nl_generic(0x10, 0x3, 0x10) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) r6 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r6, 0x8933, &(0x7f0000000dc0)={'wlan1\x00', 0x0}) (async) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_FRAME(r6, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000001240)={&(0x7f00000001c0)={0x3c, r8, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r7}, @void}}, [@NL80211_ATTR_FRAME={0x1e, 0x33, @deauth={@wo_ht={{}, {}, @device_a, @device_b}, 0x0, @void}}]}, 0x3c}}, 0x0) (async) r9 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x4, &(0x7f0000000040)=ANY=[@ANYBLOB="18000000000100060000000077f2ab268500000023cd730000"], &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x45) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000001c0)='kfree\x00', r9}, 0x10) (async) r10 = socket$nl_xfrm(0x10, 0x3, 0x6) (async) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r11, &(0x7f0000000200), 0xf000) (async) sendfile(r11, r12, 0x0, 0xf03b0000) (async) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r13, r12, &(0x7f00000002c0)=0x335773c3, 0x8) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000400)={&(0x7f0000000280)='fscache_access_volume\x00', r13}, 0x10) (async) sendmsg$nl_xfrm(r10, &(0x7f0000001380)={0x0, 0x0, &(0x7f0000000100)={&(0x7f00000002c0)=@newsa={0x138, 0x10, 0x1, 0x0, 0x0, {{@in, @in6=@private1}, {@in6=@private1, 0x0, 0x33}, @in=@multicast2, {}, {}, {}, 0x0, 0x0, 0x2}, [@algo_auth={0x48, 0x1, {{'cmac(aes)\x00'}}}]}, 0x138}}, 0x0) (async) poll(&(0x7f00000000c0)=[{r4, 0x712}, {r9, 0x20}, {0xffffffffffffffff, 0x1000}, {r10, 0x80}, {r5}], 0x5, 0x400) (async) sendfile(r5, r4, 0x0, 0x10000a006) 17:04:19 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3fa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2894.660819][T23657] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:19 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async, rerun: 64) r1 = socket(0x0, 0x0, 0x0) (async, rerun: 64) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r1, 0xc020f509, &(0x7f0000000100)={r0, 0x80000000, 0x8b39, 0x2}) sendmsg$nl_route(r3, &(0x7f0000000280)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="240023d2b0d94f6031b9ab5f2616d003843e190010002bbd7000ffdbdf251c201400ff042709cb000008000100c699d7c9074ae1cd2d3968d2493f6f447c6fa4a03294ae2e23573c25eff58f8e7317057f82c6065005d86a4abeac8d21d22c232af2aa9266cc6e634b534f524afbe21e7b1d65878e0e628960d782c15f5bf5b6f88f25c95aec4098175288ee76c1dda9106c993642faafbcb48e6ecc48ddfcd67d4f61c4efd47ecb0ee0efbf60f11405881058e1f542789b232583e4"], 0x24}, 0x1, 0x0, 0x0, 0x2004011}, 0x4000) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async, rerun: 32) openat$cgroup_ro(r2, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) (rerun: 32) ioctl$BTRFS_IOC_SUBVOL_CREATE_V2(r1, 0x50009418, &(0x7f00000006c0)={{r2}, 0x0, 0x0, @inherit={0x58, &(0x7f0000000080)={0x1, 0x2, 0x8, 0x6, {0x0, 0xffffffffffffff10, 0x10000, 0x8000, 0x7}, [0x5, 0x152]}}, @subvolid=0x8f}) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) getpeername$packet(r1, &(0x7f0000000000)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @link_local}, &(0x7f0000000040)=0x14) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=ANY=[], 0x48}, 0x1, 0x0, 0x0, 0x404c014}, 0x0) [ 2894.853905][T23657] bond687: entered promiscuous mode [ 2894.877022][T23657] 8021q: adding VLAN 0 to HW filter on device bond687 17:04:19 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x2f}]}, 0x3c}}, 0x0) [ 2894.918871][T23668] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:04:20 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000180)=ANY=[@ANYBLOB="48040000", @ANYRES16=r1, @ANYBLOB="01edff000000000004003b1c210008000300", @ANYRES32=r2, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001505050505050"], 0x448}}, 0x0) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='memory.current\x00', 0x275a, 0x0) write$binfmt_script(r3, &(0x7f0000000000)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r3, 0x0) write$binfmt_script(r0, &(0x7f0000000040)=ANY=[], 0x208e24b) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) (async) r6 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r6, 0x8933, &(0x7f0000000dc0)={'wlan1\x00', 0x0}) (async) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_FRAME(r6, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000001240)={&(0x7f00000001c0)={0x3c, r8, 0x1, 0x0, 0x0, {{}, {@val={0x8, 0x3, r7}, @void}}, [@NL80211_ATTR_FRAME={0x1e, 0x33, @deauth={@wo_ht={{}, {}, @device_a, @device_b}, 0x0, @void}}]}, 0x3c}}, 0x0) (async) r9 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000200)={0x11, 0x4, &(0x7f0000000040)=ANY=[@ANYBLOB="18000000000100060000000077f2ab268500000023cd730000"], &(0x7f0000000080)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x2, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x45) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000001c0)='kfree\x00', r9}, 0x10) (async) r10 = socket$nl_xfrm(0x10, 0x3, 0x6) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r11, &(0x7f0000000200), 0xf000) (async) sendfile(r11, r12, 0x0, 0xf03b0000) (async) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r13, r12, &(0x7f00000002c0)=0x335773c3, 0x8) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000400)={&(0x7f0000000280)='fscache_access_volume\x00', r13}, 0x10) (async) sendmsg$nl_xfrm(r10, &(0x7f0000001380)={0x0, 0x0, &(0x7f0000000100)={&(0x7f00000002c0)=@newsa={0x138, 0x10, 0x1, 0x0, 0x0, {{@in, @in6=@private1}, {@in6=@private1, 0x0, 0x33}, @in=@multicast2, {}, {}, {}, 0x0, 0x0, 0x2}, [@algo_auth={0x48, 0x1, {{'cmac(aes)\x00'}}}]}, 0x138}}, 0x0) poll(&(0x7f00000000c0)=[{r4, 0x712}, {r9, 0x20}, {0xffffffffffffffff, 0x1000}, {r10, 0x80}, {r5}], 0x5, 0x400) (async) sendfile(r5, r4, 0x0, 0x10000a006) 17:04:20 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) sendfile(r0, r1, 0x0, 0xf03b0000) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r2, r1, &(0x7f00000002c0)=0x335773c3, 0x8) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000200), 0xf000) sendfile(r3, r4, 0x0, 0xf03b0000) sendfile(r3, r3, &(0x7f0000000180), 0x0) r5 = bpf$BPF_MAP_GET_FD_BY_ID(0xe, &(0x7f00000004c0)={0x0, 0x7f}, 0xc) r6 = bpf$MAP_CREATE(0x0, &(0x7f0000000500)=@base={0xd, 0x6, 0x0, 0xfffffffc, 0xc82, 0xffffffffffffffff, 0x100, '\x00', 0x0, 0xffffffffffffffff, 0x0, 0x1, 0x3}, 0x48) r7 = bpf$ITER_CREATE(0x21, &(0x7f0000000580), 0x8) r8 = bpf$MAP_CREATE(0x0, &(0x7f00000005c0)=@base={0x17, 0x8, 0x100, 0xfffffffa, 0x900, 0xffffffffffffffff, 0x101, '\x00', 0x0, 0xffffffffffffffff, 0x1, 0x0, 0x1}, 0x48) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000840)='blkio.throttle.io_serviced\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000006c0)={0x18, 0x5, &(0x7f00000003c0)=@framed={{0x18, 0x0, 0x0, 0x0, 0x4fd, 0x0, 0x0, 0x0, 0x8}, [@ldst={0x2, 0x0, 0x1, 0xa, 0xb, 0x0, 0x4}, @call={0x85, 0x0, 0x0, 0x58}]}, &(0x7f0000000400)='syzkaller\x00', 0x7, 0x0, 0x0, 0x41100, 0x11, '\x00', 0x0, 0x0, r1, 0x8, &(0x7f0000000440)={0x3, 0x1}, 0x8, 0x10, &(0x7f0000000480)={0x1, 0x2, 0x1, 0x48000000}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000640)=[r3, r5, r6, 0xffffffffffffffff, 0x1, 0x1, r7, r8, r9, 0x1]}, 0x80) r10 = socket$netlink(0x10, 0x3, 0x0) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r11, &(0x7f0000000200), 0xf000) sendfile(r11, r12, 0x0, 0xf03b0000) sendfile(r11, r11, &(0x7f0000000180), 0x0) sendmsg$nl_route_sched(r11, &(0x7f0000000380)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x1000}, 0xc, &(0x7f0000000340)={&(0x7f0000000300)=ANY=[@ANYBLOB="ffffffff660004002dbd7000fedbdf2500010000", @ANYRES32=0x0, @ANYBLOB="01000100f3ff09000a00050008000b000500000008000b00a4fc000008000b00ff030000"], 0x3c}, 0x1, 0x0, 0x0, 0x20000401}, 0x80) r13 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r10, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) sendmsg$IPCTNL_MSG_EXP_GET_STATS_CPU(r13, &(0x7f0000000280)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000000200)={&(0x7f00000001c0)={0x14, 0x3, 0x2, 0x301, 0x0, 0x0, {0x3, 0x0, 0x2}, [""]}, 0x14}, 0x1, 0x0, 0x0, 0x41}, 0x4000) sendmsg$nl_route(0xffffffffffffffff, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000080)={&(0x7f0000000740)=ANY=[@ANYBLOB="280000006c0000012cbd700001dcdf2500000000", @ANYRES32=0x0, @ANYBLOB="0100010040020000efff2204200000005845552bfac297eacfbd56398c525cf78bb3b61572d5b555be16cdf17056ffa26d08de6ade80a2ae75f10dc465aee3e008d079ac4bc751dc7cc8522d7e7a7b4f28ac23f6"], 0x28}, 0x1, 0x0, 0x0, 0x4000000}, 0x48c4) 17:04:20 executing program 0: ioctl$sock_ipv6_tunnel_SIOCDELTUNNEL(0xffffffffffffffff, 0x89f2, &(0x7f0000000000)={'syztnl2\x00', 0x0}) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r4, 0x0, 0x8000000000004) openat$cgroup_ro(r4, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) getsockname$inet6(r4, &(0x7f0000000300)={0xa, 0x0, 0x0, @mcast1}, &(0x7f0000000340)=0x1c) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r3, @ANYBLOB="01e5ff000000000004003b1c210008000300", @ANYRES32=r2, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001505050505050"], 0x448}}, 0x0) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='net_prio.prioidx\x00', 0x275a, 0x0) r6 = bpf$ITER_CREATE(0x21, &(0x7f0000000200)={r0}, 0x8) sendfile(r6, 0xffffffffffffffff, &(0x7f0000000280)=0x8, 0x10001) write$binfmt_script(r5, &(0x7f0000000000)=ANY=[], 0x208e24b) bpf$ITER_CREATE(0x21, &(0x7f00000002c0)={r5}, 0x8) r7 = socket$nl_route(0x10, 0x3, 0x0) r8 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r8, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r8, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r7, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r9, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r9, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r9, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) ioctl$sock_ipv6_tunnel_SIOCADDTUNNEL(0xffffffffffffffff, 0x89f1, &(0x7f0000000400)={'ip6tnl0\x00', &(0x7f0000000380)={'ip6_vti0\x00', r9, 0x2f, 0x9, 0x0, 0x80000001, 0x40, @dev={0xfe, 0x80, '\x00', 0x1f}, @private1={0xfc, 0x1, '\x00', 0x1}, 0x1, 0x7, 0x53e, 0x9}}) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r5, 0x0) preadv(r5, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r11 = socket$nl_generic(0x10, 0x3, 0x10) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r12, &(0x7f0000000240)=ANY=[], 0x3af4701e) sendfile(r11, r10, 0x0, 0x10000a006) [ 2894.972684][T23668] workqueue: Failed to create a rescuer kthread for wq "bond1049": -EINTR [ 2895.261251][T23676] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:20 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xba, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:20 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x2}, 0x0) [ 2895.303349][T23676] workqueue: Failed to create a rescuer kthread for wq "bond811": -EINTR [ 2895.355673][T23699] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2895.446516][T23699] bond1123: entered promiscuous mode [ 2895.454360][T23699] 8021q: adding VLAN 0 to HW filter on device bond1123 [ 2895.595210][T23700] bond1123: (slave bridge1149): making interface the new active one [ 2895.603627][T23700] bridge1149: entered promiscuous mode [ 2895.618542][T23700] bond1123: (slave bridge1149): Enslaving as an active interface with an up link 17:04:20 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x3fc, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2895.647929][T23711] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2895.668201][ T27] audit: type=1804 audit(1688403860.624:425): pid=23723 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/2032/cgroup.controllers" dev="sda1" ino=1947 res=1 errno=0 [ 2895.724849][T23711] bond688: entered promiscuous mode [ 2895.732785][T23711] 8021q: adding VLAN 0 to HW filter on device bond688 17:04:20 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x30}]}, 0x3c}}, 0x0) [ 2895.771888][ T27] audit: type=1804 audit(1688403860.684:426): pid=23723 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=ToMToU comm="syz-executor.0" name="/root/syzkaller-testdir444109964/syzkaller.UHVosO/2032/cgroup.controllers" dev="sda1" ino=1947 res=1 errno=0 [ 2895.877512][T23729] bond1049: entered promiscuous mode [ 2895.884962][T23729] 8021q: adding VLAN 0 to HW filter on device bond1049 [ 2895.919611][T23732] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2896.118459][T23735] bond1049: (slave bridge1115): making interface the new active one [ 2896.141244][T23735] bridge1115: entered promiscuous mode [ 2896.163344][T23735] bond1049: (slave bridge1115): Enslaving as an active interface with an up link 17:04:21 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xda, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2896.265565][T23734] bond811: entered promiscuous mode [ 2896.276654][T23734] 8021q: adding VLAN 0 to HW filter on device bond811 17:04:21 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x4}, 0x0) 17:04:21 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) (async) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) sendfile(r0, r1, 0x0, 0xf03b0000) (async) sendfile(r0, r1, 0x0, 0xf03b0000) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r2, r1, &(0x7f00000002c0)=0x335773c3, 0x8) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000200), 0xf000) sendfile(r3, r4, 0x0, 0xf03b0000) (async) sendfile(r3, r4, 0x0, 0xf03b0000) sendfile(r3, r3, &(0x7f0000000180), 0x0) bpf$BPF_MAP_GET_FD_BY_ID(0xe, &(0x7f00000004c0)={0x0, 0x7f}, 0xc) (async) r5 = bpf$BPF_MAP_GET_FD_BY_ID(0xe, &(0x7f00000004c0)={0x0, 0x7f}, 0xc) bpf$MAP_CREATE(0x0, &(0x7f0000000500)=@base={0xd, 0x6, 0x0, 0xfffffffc, 0xc82, 0xffffffffffffffff, 0x100, '\x00', 0x0, 0xffffffffffffffff, 0x0, 0x1, 0x3}, 0x48) (async) r6 = bpf$MAP_CREATE(0x0, &(0x7f0000000500)=@base={0xd, 0x6, 0x0, 0xfffffffc, 0xc82, 0xffffffffffffffff, 0x100, '\x00', 0x0, 0xffffffffffffffff, 0x0, 0x1, 0x3}, 0x48) r7 = bpf$ITER_CREATE(0x21, &(0x7f0000000580), 0x8) r8 = bpf$MAP_CREATE(0x0, &(0x7f00000005c0)=@base={0x17, 0x8, 0x100, 0xfffffffa, 0x900, 0xffffffffffffffff, 0x101, '\x00', 0x0, 0xffffffffffffffff, 0x1, 0x0, 0x1}, 0x48) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000840)='blkio.throttle.io_serviced\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000006c0)={0x18, 0x5, &(0x7f00000003c0)=@framed={{0x18, 0x0, 0x0, 0x0, 0x4fd, 0x0, 0x0, 0x0, 0x8}, [@ldst={0x2, 0x0, 0x1, 0xa, 0xb, 0x0, 0x4}, @call={0x85, 0x0, 0x0, 0x58}]}, &(0x7f0000000400)='syzkaller\x00', 0x7, 0x0, 0x0, 0x41100, 0x11, '\x00', 0x0, 0x0, r1, 0x8, &(0x7f0000000440)={0x3, 0x1}, 0x8, 0x10, &(0x7f0000000480)={0x1, 0x2, 0x1, 0x48000000}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000640)=[r3, r5, r6, 0xffffffffffffffff, 0x1, 0x1, r7, r8, r9, 0x1]}, 0x80) r10 = socket$netlink(0x10, 0x3, 0x0) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r11, &(0x7f0000000200), 0xf000) sendfile(r11, r12, 0x0, 0xf03b0000) sendfile(r11, r11, &(0x7f0000000180), 0x0) sendmsg$nl_route_sched(r11, &(0x7f0000000380)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x1000}, 0xc, &(0x7f0000000340)={&(0x7f0000000300)=ANY=[@ANYBLOB="ffffffff660004002dbd7000fedbdf2500010000", @ANYRES32=0x0, @ANYBLOB="01000100f3ff09000a00050008000b000500000008000b00a4fc000008000b00ff030000"], 0x3c}, 0x1, 0x0, 0x0, 0x20000401}, 0x80) r13 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r10, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) sendmsg$IPCTNL_MSG_EXP_GET_STATS_CPU(r13, &(0x7f0000000280)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000000200)={&(0x7f00000001c0)={0x14, 0x3, 0x2, 0x301, 0x0, 0x0, {0x3, 0x0, 0x2}, [""]}, 0x14}, 0x1, 0x0, 0x0, 0x41}, 0x4000) (async) sendmsg$IPCTNL_MSG_EXP_GET_STATS_CPU(r13, &(0x7f0000000280)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000000200)={&(0x7f00000001c0)={0x14, 0x3, 0x2, 0x301, 0x0, 0x0, {0x3, 0x0, 0x2}, [""]}, 0x14}, 0x1, 0x0, 0x0, 0x41}, 0x4000) sendmsg$nl_route(0xffffffffffffffff, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000080)={&(0x7f0000000740)=ANY=[@ANYBLOB="280000006c0000012cbd700001dcdf2500000000", @ANYRES32=0x0, @ANYBLOB="0100010040020000efff2204200000005845552bfac297eacfbd56398c525cf78bb3b61572d5b555be16cdf17056ffa26d08de6ade80a2ae75f10dc465aee3e008d079ac4bc751dc7cc8522d7e7a7b4f28ac23f6"], 0x28}, 0x1, 0x0, 0x0, 0x4000000}, 0x48c4) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000080)={&(0x7f0000000740)=ANY=[@ANYBLOB="280000006c0000012cbd700001dcdf2500000000", @ANYRES32=0x0, @ANYBLOB="0100010040020000efff2204200000005845552bfac297eacfbd56398c525cf78bb3b61572d5b555be16cdf17056ffa26d08de6ade80a2ae75f10dc465aee3e008d079ac4bc751dc7cc8522d7e7a7b4f28ac23f6"], 0x28}, 0x1, 0x0, 0x0, 0x4000000}, 0x48c4) [ 2896.500533][T23746] bond1124: entered promiscuous mode [ 2896.517966][T23746] 8021q: adding VLAN 0 to HW filter on device bond1124 17:04:21 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) (async) sendfile(r0, r1, 0x0, 0xf03b0000) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r2, r1, &(0x7f00000002c0)=0x335773c3, 0x8) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000200), 0xf000) (async) sendfile(r3, r4, 0x0, 0xf03b0000) sendfile(r3, r3, &(0x7f0000000180), 0x0) (async) r5 = bpf$BPF_MAP_GET_FD_BY_ID(0xe, &(0x7f00000004c0)={0x0, 0x7f}, 0xc) (async) r6 = bpf$MAP_CREATE(0x0, &(0x7f0000000500)=@base={0xd, 0x6, 0x0, 0xfffffffc, 0xc82, 0xffffffffffffffff, 0x100, '\x00', 0x0, 0xffffffffffffffff, 0x0, 0x1, 0x3}, 0x48) (async) r7 = bpf$ITER_CREATE(0x21, &(0x7f0000000580), 0x8) (async) r8 = bpf$MAP_CREATE(0x0, &(0x7f00000005c0)=@base={0x17, 0x8, 0x100, 0xfffffffa, 0x900, 0xffffffffffffffff, 0x101, '\x00', 0x0, 0xffffffffffffffff, 0x1, 0x0, 0x1}, 0x48) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000840)='blkio.throttle.io_serviced\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) (async) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000006c0)={0x18, 0x5, &(0x7f00000003c0)=@framed={{0x18, 0x0, 0x0, 0x0, 0x4fd, 0x0, 0x0, 0x0, 0x8}, [@ldst={0x2, 0x0, 0x1, 0xa, 0xb, 0x0, 0x4}, @call={0x85, 0x0, 0x0, 0x58}]}, &(0x7f0000000400)='syzkaller\x00', 0x7, 0x0, 0x0, 0x41100, 0x11, '\x00', 0x0, 0x0, r1, 0x8, &(0x7f0000000440)={0x3, 0x1}, 0x8, 0x10, &(0x7f0000000480)={0x1, 0x2, 0x1, 0x48000000}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000640)=[r3, r5, r6, 0xffffffffffffffff, 0x1, 0x1, r7, r8, r9, 0x1]}, 0x80) (async) r10 = socket$netlink(0x10, 0x3, 0x0) (async) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) (async) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r11, &(0x7f0000000200), 0xf000) (async) sendfile(r11, r12, 0x0, 0xf03b0000) sendfile(r11, r11, &(0x7f0000000180), 0x0) (async) sendmsg$nl_route_sched(r11, &(0x7f0000000380)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x1000}, 0xc, &(0x7f0000000340)={&(0x7f0000000300)=ANY=[@ANYBLOB="ffffffff660004002dbd7000fedbdf2500010000", @ANYRES32=0x0, @ANYBLOB="01000100f3ff09000a00050008000b000500000008000b00a4fc000008000b00ff030000"], 0x3c}, 0x1, 0x0, 0x0, 0x20000401}, 0x80) (async) r13 = socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r10, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) sendmsg$IPCTNL_MSG_EXP_GET_STATS_CPU(r13, &(0x7f0000000280)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000000200)={&(0x7f00000001c0)={0x14, 0x3, 0x2, 0x301, 0x0, 0x0, {0x3, 0x0, 0x2}, [""]}, 0x14}, 0x1, 0x0, 0x0, 0x41}, 0x4000) sendmsg$nl_route(0xffffffffffffffff, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000080)={&(0x7f0000000740)=ANY=[@ANYBLOB="280000006c0000012cbd700001dcdf2500000000", @ANYRES32=0x0, @ANYBLOB="0100010040020000efff2204200000005845552bfac297eacfbd56398c525cf78bb3b61572d5b555be16cdf17056ffa26d08de6ade80a2ae75f10dc465aee3e008d079ac4bc751dc7cc8522d7e7a7b4f28ac23f6"], 0x28}, 0x1, 0x0, 0x0, 0x4000000}, 0x48c4) [ 2896.626223][T23748] bond689: entered promiscuous mode [ 2896.680268][T23748] 8021q: adding VLAN 0 to HW filter on device bond689 17:04:21 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) recvfrom$inet6(r1, &(0x7f0000000000)=""/73, 0x49, 0x40000040, &(0x7f0000000080)={0xa, 0x4e22, 0x400, @mcast1, 0x4}, 0x1c) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2896.853698][T23749] bond1124: (slave bridge1150): making interface the new active one [ 2896.865899][T23749] bridge1150: entered promiscuous mode 17:04:21 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x500, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2896.906534][T23749] bond1124: (slave bridge1150): Enslaving as an active interface with an up link 17:04:22 executing program 0: ioctl$sock_ipv6_tunnel_SIOCDELTUNNEL(0xffffffffffffffff, 0x89f2, &(0x7f0000000000)={'syztnl2\x00', 0x0}) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r4, 0x0, 0x8000000000004) openat$cgroup_ro(r4, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) (async) getsockname$inet6(r4, &(0x7f0000000300)={0xa, 0x0, 0x0, @mcast1}, &(0x7f0000000340)=0x1c) (async) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r3, @ANYBLOB="01e5ff000000000004003b1c210008000300", @ANYRES32=r2, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001505050505050"], 0x448}}, 0x0) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='net_prio.prioidx\x00', 0x275a, 0x0) (async) r6 = bpf$ITER_CREATE(0x21, &(0x7f0000000200)={r0}, 0x8) sendfile(r6, 0xffffffffffffffff, &(0x7f0000000280)=0x8, 0x10001) (async) write$binfmt_script(r5, &(0x7f0000000000)=ANY=[], 0x208e24b) (async) bpf$ITER_CREATE(0x21, &(0x7f00000002c0)={r5}, 0x8) (async) r7 = socket$nl_route(0x10, 0x3, 0x0) (async) r8 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r8, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r8, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r7, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r9, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r9, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) (async) sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r9, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) ioctl$sock_ipv6_tunnel_SIOCADDTUNNEL(0xffffffffffffffff, 0x89f1, &(0x7f0000000400)={'ip6tnl0\x00', &(0x7f0000000380)={'ip6_vti0\x00', r9, 0x2f, 0x9, 0x0, 0x80000001, 0x40, @dev={0xfe, 0x80, '\x00', 0x1f}, @private1={0xfc, 0x1, '\x00', 0x1}, 0x1, 0x7, 0x53e, 0x9}}) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r5, 0x0) preadv(r5, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) (async) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r11 = socket$nl_generic(0x10, 0x3, 0x10) (async) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r12, &(0x7f0000000240)=ANY=[], 0x3af4701e) (async) sendfile(r11, r10, 0x0, 0x10000a006) 17:04:22 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x31}]}, 0x3c}}, 0x0) [ 2897.087217][T23753] bond1050: entered promiscuous mode [ 2897.096974][T23753] 8021q: adding VLAN 0 to HW filter on device bond1050 [ 2897.230150][T23754] bond1050: (slave bridge1116): making interface the new active one [ 2897.252181][T23754] bridge1116: entered promiscuous mode [ 2897.275339][T23754] bond1050: (slave bridge1116): Enslaving as an active interface with an up link 17:04:22 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xf0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:22 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x6}, 0x0) 17:04:22 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) recvfrom$inet6(r1, &(0x7f0000000000)=""/73, 0x49, 0x40000040, &(0x7f0000000080)={0xa, 0x4e22, 0x400, @mcast1, 0x4}, 0x1c) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2897.371318][T23758] workqueue: Failed to create a rescuer kthread for wq "bond812": -EINTR [ 2897.601148][T23785] bond1125: entered promiscuous mode [ 2897.617683][T23785] 8021q: adding VLAN 0 to HW filter on device bond1125 [ 2897.756135][T23788] bond1125: (slave bridge1151): making interface the new active one [ 2897.815249][T23788] bridge1151: entered promiscuous mode [ 2897.875745][T23788] bond1125: (slave bridge1151): Enslaving as an active interface with an up link [ 2897.904963][T23787] validate_nla: 7 callbacks suppressed [ 2897.904987][T23787] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:22 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x600, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2897.968424][T23787] bond690: entered promiscuous mode [ 2897.981130][T23787] 8021q: adding VLAN 0 to HW filter on device bond690 17:04:23 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x32}]}, 0x3c}}, 0x0) [ 2898.015548][T23791] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. 17:04:23 executing program 0: ioctl$sock_ipv6_tunnel_SIOCDELTUNNEL(0xffffffffffffffff, 0x89f2, &(0x7f0000000000)={'syztnl2\x00', 0x0}) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) (async) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000240)={'wlan1\x00', 0x0}) r3 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r4, 0x0, 0x8000000000004) (async) openat$cgroup_ro(r4, &(0x7f00000001c0)='cpuacct.usage_percpu_user\x00', 0x0, 0x0) getsockname$inet6(r4, &(0x7f0000000300)={0xa, 0x0, 0x0, @mcast1}, &(0x7f0000000340)=0x1c) sendmsg$NL80211_CMD_FRAME(0xffffffffffffffff, &(0x7f0000001280)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB="48040000", @ANYRES16=r3, @ANYBLOB="01e5ff000000000004003b1c210008000300", @ANYRES32=r2, @ANYBLOB="2c0433005000de295b3acba52ee4080211000001505050505050"], 0x448}}, 0x0) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000180)='net_prio.prioidx\x00', 0x275a, 0x0) (async) r6 = bpf$ITER_CREATE(0x21, &(0x7f0000000200)={r0}, 0x8) sendfile(r6, 0xffffffffffffffff, &(0x7f0000000280)=0x8, 0x10001) (async) write$binfmt_script(r5, &(0x7f0000000000)=ANY=[], 0x208e24b) bpf$ITER_CREATE(0x21, &(0x7f00000002c0)={r5}, 0x8) r7 = socket$nl_route(0x10, 0x3, 0x0) (async) r8 = socket(0x10, 0x2, 0x0) sendmsg$nl_route_sched(r8, &(0x7f0000000180)={0x0, 0x0, &(0x7f0000000140)={0x0, 0x140}}, 0x0) getsockname$packet(r8, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000100)=0xab) sendmsg$nl_route(r7, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c0000001000010400eeffffffffffff00000000", @ANYRES32=r9, @ANYBLOB="01000000010000001c0012000c000100627269646765"], 0x3c}}, 0x0) sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000005840)={0x0, 0x0, &(0x7f0000000780)={&(0x7f0000000240)=ANY=[@ANYBLOB="4800000024000b0e00"/20, @ANYRES32=r9, @ANYBLOB="00000000ffffffff0000000008000100687462001c0002001800020003"], 0x48}}, 0x0) (async) sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000005c0)=@newtfilter={0x44, 0x2c, 0xd27, 0x0, 0x0, {0x0, 0x0, 0x0, r9, {}, {}, {0xfff3}}, [@filter_kind_options=@f_u32={{0x8}, {0x18, 0x2, [@TCA_U32_SEL={0x14, 0x5, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80080}}]}}]}, 0x44}}, 0x0) (async) ioctl$sock_ipv6_tunnel_SIOCADDTUNNEL(0xffffffffffffffff, 0x89f1, &(0x7f0000000400)={'ip6tnl0\x00', &(0x7f0000000380)={'ip6_vti0\x00', r9, 0x2f, 0x9, 0x0, 0x80000001, 0x40, @dev={0xfe, 0x80, '\x00', 0x1f}, @private1={0xfc, 0x1, '\x00', 0x1}, 0x1, 0x7, 0x53e, 0x9}}) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r5, 0x0) (async) preadv(r5, &(0x7f00000015c0)=[{&(0x7f0000000080)=""/124, 0xffffffff000}], 0x5, 0x0, 0x0) (async) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r11 = socket$nl_generic(0x10, 0x3, 0x10) (async) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000000c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r12, &(0x7f0000000240)=ANY=[], 0x3af4701e) (async) sendfile(r11, r10, 0x0, 0x10000a006) [ 2898.062251][T23799] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2898.112400][T23799] bond1051: entered promiscuous mode [ 2898.118365][T23799] 8021q: adding VLAN 0 to HW filter on device bond1051 17:04:23 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x12a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2898.253128][T23800] bond1051: (slave bridge1117): making interface the new active one [ 2898.263871][T23800] bridge1117: entered promiscuous mode [ 2898.288241][T23800] bond1051: (slave bridge1117): Enslaving as an active interface with an up link [ 2898.339917][T23802] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:23 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x8}, 0x0) 17:04:23 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) recvfrom$inet6(r1, &(0x7f0000000000)=""/73, 0x49, 0x40000040, &(0x7f0000000080)={0xa, 0x4e22, 0x400, @mcast1, 0x4}, 0x1c) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2898.380606][T23802] workqueue: Failed to create a rescuer kthread for wq "bond812": -EINTR [ 2898.440693][T23813] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2898.524753][T23813] bond1126: entered promiscuous mode [ 2898.533016][T23813] 8021q: adding VLAN 0 to HW filter on device bond1126 [ 2898.766133][T23814] bond1126: (slave bridge1152): making interface the new active one [ 2898.775982][T23814] bridge1152: entered promiscuous mode [ 2898.788271][T23814] bond1126: (slave bridge1152): Enslaving as an active interface with an up link [ 2898.810470][T23818] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:23 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x700, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2898.866036][T23818] bond691: entered promiscuous mode [ 2898.874416][T23818] 8021q: adding VLAN 0 to HW filter on device bond691 [ 2898.887414][T23821] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. 17:04:23 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x31}]}, 0x3c}}, 0x0) 17:04:23 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x33}]}, 0x3c}}, 0x0) [ 2898.981703][T23827] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2899.091753][T23827] bond1052: entered promiscuous mode [ 2899.100791][T23827] 8021q: adding VLAN 0 to HW filter on device bond1052 [ 2899.211748][T23829] bond1052: (slave bridge1118): making interface the new active one [ 2899.220781][T23829] bridge1118: entered promiscuous mode [ 2899.244386][T23829] bond1052: (slave bridge1118): Enslaving as an active interface with an up link 17:04:24 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x132, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2899.297112][T23832] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2899.385411][T23832] bond812: entered promiscuous mode [ 2899.395273][T23832] 8021q: adding VLAN 0 to HW filter on device bond812 17:04:24 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0xa}, 0x0) 17:04:24 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000002f0012800b00010062090000006500001800060000002e00000000000100000005002a00000000"], 0x48}}, 0x0) socketpair(0x21, 0x2, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) sendmsg$SEG6_CMD_DUMPHMAC(r1, &(0x7f00000001c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x1000}, 0xc, &(0x7f0000000200)={&(0x7f0000000080)={0x58, 0x0, 0x200, 0x70bd2d, 0x25dfdbff, {}, [@SEG6_ATTR_DST={0x14, 0x1, @mcast2}, @SEG6_ATTR_DSTLEN={0x8, 0x2, 0x100}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x5}, @SEG6_ATTR_ALGID={0x5, 0x6, 0x3}, @SEG6_ATTR_SECRET={0x10, 0x4, [0x3, 0xffffffff, 0x400]}, @SEG6_ATTR_ALGID={0x5, 0x6, 0x1}]}, 0x58}, 0x1, 0x0, 0x0, 0x40}, 0x2000c080) [ 2899.540459][T23843] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2899.672877][T23843] bond1127: entered promiscuous mode [ 2899.680494][T23843] 8021q: adding VLAN 0 to HW filter on device bond1127 [ 2899.797874][T23844] bond1127: (slave bridge1153): making interface the new active one [ 2899.809604][T23844] bridge1153: entered promiscuous mode [ 2899.830057][T23844] bond1127: (slave bridge1153): Enslaving as an active interface with an up link 17:04:24 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x900, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2899.850963][T23848] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 2899.975810][T23848] bond511: entered promiscuous mode [ 2899.982679][T23848] 8021q: adding VLAN 0 to HW filter on device bond511 [ 2899.997076][T23851] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:25 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x8}, 0x0) [ 2900.058364][T23851] bond692: entered promiscuous mode [ 2900.064452][T23851] 8021q: adding VLAN 0 to HW filter on device bond692 17:04:25 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x34}]}, 0x3c}}, 0x0) 17:04:25 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x142, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2900.195165][T23856] workqueue: Failed to create a rescuer kthread for wq "bond1053": -EINTR [ 2900.371553][T23860] bond813: entered promiscuous mode [ 2900.389676][T23860] 8021q: adding VLAN 0 to HW filter on device bond813 17:04:25 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0xc}, 0x0) [ 2900.520462][T23866] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.3'. 17:04:25 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000002f0012800b00010062090000006500001800060000002e00000000000100000005002a00000000"], 0x48}}, 0x0) (async) socketpair(0x21, 0x2, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) sendmsg$SEG6_CMD_DUMPHMAC(r1, &(0x7f00000001c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x1000}, 0xc, &(0x7f0000000200)={&(0x7f0000000080)={0x58, 0x0, 0x200, 0x70bd2d, 0x25dfdbff, {}, [@SEG6_ATTR_DST={0x14, 0x1, @mcast2}, @SEG6_ATTR_DSTLEN={0x8, 0x2, 0x100}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x5}, @SEG6_ATTR_ALGID={0x5, 0x6, 0x3}, @SEG6_ATTR_SECRET={0x10, 0x4, [0x3, 0xffffffff, 0x400]}, @SEG6_ATTR_ALGID={0x5, 0x6, 0x1}]}, 0x58}, 0x1, 0x0, 0x0, 0x40}, 0x2000c080) [ 2900.668387][T23871] bond1128: entered promiscuous mode [ 2900.692820][T23871] 8021q: adding VLAN 0 to HW filter on device bond1128 [ 2900.857505][T23872] bond1128: (slave bridge1154): making interface the new active one [ 2900.874824][T23872] bridge1154: entered promiscuous mode [ 2900.892018][T23872] bond1128: (slave bridge1154): Enslaving as an active interface with an up link 17:04:25 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xa00, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2900.996620][T23875] bond512: entered promiscuous mode [ 2901.039273][T23875] 8021q: adding VLAN 0 to HW filter on device bond512 17:04:26 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x32}]}, 0x3c}}, 0x0) 17:04:26 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x35}]}, 0x3c}}, 0x0) [ 2901.090439][T23878] workqueue: Failed to create a rescuer kthread for wq "bond693": -EINTR [ 2901.283176][T23885] bond1053: entered promiscuous mode [ 2901.311616][T23885] 8021q: adding VLAN 0 to HW filter on device bond1053 [ 2901.453667][T23886] bond1053: (slave bridge1119): making interface the new active one [ 2901.463037][T23886] bridge1119: entered promiscuous mode [ 2901.479936][T23886] bond1053: (slave bridge1119): Enslaving as an active interface with an up link 17:04:26 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x14c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:26 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0xe}, 0x0) [ 2901.515724][T23888] workqueue: Failed to create a rescuer kthread for wq "bond814": -EINTR [ 2901.617876][T23894] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.3'. 17:04:26 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4800000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000002f0012800b00010062090000006500001800060000002e00000000000100000005002a00000000"], 0x48}}, 0x0) (async) socketpair(0x21, 0x2, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) sendmsg$SEG6_CMD_DUMPHMAC(r1, &(0x7f00000001c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x1000}, 0xc, &(0x7f0000000200)={&(0x7f0000000080)={0x58, 0x0, 0x200, 0x70bd2d, 0x25dfdbff, {}, [@SEG6_ATTR_DST={0x14, 0x1, @mcast2}, @SEG6_ATTR_DSTLEN={0x8, 0x2, 0x100}, @SEG6_ATTR_SECRETLEN={0x5, 0x5, 0x5}, @SEG6_ATTR_ALGID={0x5, 0x6, 0x3}, @SEG6_ATTR_SECRET={0x10, 0x4, [0x3, 0xffffffff, 0x400]}, @SEG6_ATTR_ALGID={0x5, 0x6, 0x1}]}, 0x58}, 0x1, 0x0, 0x0, 0x40}, 0x2000c080) [ 2901.829351][T23899] bond1129: entered promiscuous mode [ 2901.835031][T23899] 8021q: adding VLAN 0 to HW filter on device bond1129 [ 2901.974733][T23900] bond1129: (slave bridge1155): making interface the new active one [ 2901.995007][T23900] bridge1155: entered promiscuous mode [ 2902.014619][T23900] bond1129: (slave bridge1155): Enslaving as an active interface with an up link 17:04:27 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xa02, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2902.135267][T23904] bond513: entered promiscuous mode [ 2902.148409][T23904] 8021q: adding VLAN 0 to HW filter on device bond513 17:04:27 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x32}]}, 0x3c}}, 0x0) 17:04:27 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x36}]}, 0x3c}}, 0x0) [ 2902.182635][T23906] workqueue: Failed to create a rescuer kthread for wq "bond693": -EINTR [ 2902.467745][T23911] bond1054: entered promiscuous mode [ 2902.495557][T23911] 8021q: adding VLAN 0 to HW filter on device bond1054 [ 2902.620867][T23913] bond1054: (slave bridge1120): making interface the new active one [ 2902.643283][T23913] bridge1120: entered promiscuous mode 17:04:27 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x15a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2902.669463][T23913] bond1054: (slave bridge1120): Enslaving as an active interface with an up link [ 2902.817345][T23916] bond814: entered promiscuous mode [ 2902.831174][T23916] 8021q: adding VLAN 0 to HW filter on device bond814 [ 2902.895372][T23921] netlink: 40 bytes leftover after parsing attributes in process `syz-executor.3'. 17:04:27 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000080)={'team0\x00', 0x0}) sendmsg$nl_route_sched(r0, &(0x7f0000000200)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000180)={&(0x7f00000000c0)=ANY=[@ANYBLOB="2c080000650000042abd7000fedbdf2500000000", @ANYRES32=r1, @ANYBLOB="0d0004000f001000e0ff0b000600050008ff0000"], 0x2c}, 0x1, 0x0, 0x0, 0x24044010}, 0x40044) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) ioctl$FIOCLEX(r2, 0x5451) ioctl$EXT4_IOC_SETFSUUID(r3, 0x4008662c, &(0x7f0000000000)={0x10, 0x0, "1c2facf8ff91e64a21016a2695e1eb63"}) 17:04:27 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x10}, 0x0) [ 2902.949980][ T1217] ieee802154 phy0 wpan0: encryption failed: -22 [ 2902.956453][ T1217] ieee802154 phy1 wpan1: encryption failed: -22 [ 2902.999774][T23925] bond1130: entered promiscuous mode [ 2903.005496][T23925] 8021q: adding VLAN 0 to HW filter on device bond1130 [ 2903.194446][T23927] bond1130: (slave bridge1156): making interface the new active one [ 2903.217496][T23927] bridge1156: entered promiscuous mode [ 2903.252987][T23927] bond1130: (slave bridge1156): Enslaving as an active interface with an up link [ 2903.263353][T23930] validate_nla: 13 callbacks suppressed [ 2903.263377][T23930] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 17:04:28 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xc00, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:28 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x32}]}, 0x3c}}, 0x0) [ 2903.291851][T23930] workqueue: Failed to create a rescuer kthread for wq "bond514": -EINTR [ 2903.366642][T23934] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:28 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x37}]}, 0x3c}}, 0x0) [ 2903.503651][T23934] bond693: entered promiscuous mode [ 2903.511310][T23934] 8021q: adding VLAN 0 to HW filter on device bond693 [ 2903.584478][T23940] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2903.621396][T23940] workqueue: Failed to create a rescuer kthread for wq "bond1055": -EINTR [ 2903.764527][T23945] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:28 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x162, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2903.866217][T23945] bond815: entered promiscuous mode [ 2903.875312][T23945] 8021q: adding VLAN 0 to HW filter on device bond815 17:04:28 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x16}, 0x0) 17:04:28 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) (async, rerun: 32) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000080)={'team0\x00', 0x0}) (rerun: 32) sendmsg$nl_route_sched(r0, &(0x7f0000000200)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000180)={&(0x7f00000000c0)=ANY=[@ANYBLOB="2c080000650000042abd7000fedbdf2500000000", @ANYRES32=r1, @ANYBLOB="0d0004000f001000e0ff0b000600050008ff0000"], 0x2c}, 0x1, 0x0, 0x0, 0x24044010}, 0x40044) (async) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) ioctl$FIOCLEX(r2, 0x5451) (async) ioctl$EXT4_IOC_SETFSUUID(r3, 0x4008662c, &(0x7f0000000000)={0x10, 0x0, "1c2facf8ff91e64a21016a2695e1eb63"}) 17:04:29 executing program 3: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) (async) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000080)={'team0\x00', 0x0}) sendmsg$nl_route_sched(r0, &(0x7f0000000200)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000180)={&(0x7f00000000c0)=ANY=[@ANYBLOB="2c080000650000042abd7000fedbdf2500000000", @ANYRES32=r1, @ANYBLOB="0d0004000f001000e0ff0b000600050008ff0000"], 0x2c}, 0x1, 0x0, 0x0, 0x24044010}, 0x40044) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) ioctl$FIOCLEX(r2, 0x5451) (async) ioctl$EXT4_IOC_SETFSUUID(r3, 0x4008662c, &(0x7f0000000000)={0x10, 0x0, "1c2facf8ff91e64a21016a2695e1eb63"}) [ 2904.136344][T23954] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2904.263585][T23954] bond1131: entered promiscuous mode [ 2904.277797][T23954] 8021q: adding VLAN 0 to HW filter on device bond1131 17:04:29 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$TCPDIAG_GETSOCK(r0, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000040)={&(0x7f0000000280)={0x17c, 0x12, 0x100, 0x70bd28, 0x25dfdbfb, {0x21, 0x8, 0x5d, 0x3f, {0x4e24, 0x4e24, [0x80000000, 0x5b9, 0x8, 0x400], [0x3, 0x25, 0x5, 0x3], 0x0, [0x48, 0xffff58c4]}, 0x4, 0x2}, [@INET_DIAG_REQ_BYTECODE={0x97, 0x1, "5523cf7c16cea21ea6aa50d5dedd373327228f0e4b034e37d6fff78693d41885ab6a160d0ebf81ddbb238716f7cda3b4f6e0f665f20d31460236273cd9c851104cac004691c5235f858c0228818399a681b81b0b30b1188ee906a33a51e9373de2e0d3ccd80d07f12a82e93205374a8d225a337a733384fc13591fb5068a4cdb2f69020be9c7bc94f1f42006d40c2456f2cd5f"}, @INET_DIAG_REQ_BYTECODE={0x97, 0x1, "11330b142633e3b9b4392d4650dfe1b49dbfd1a3f21845f5f9452e500b661238d54760d2f58520ca56a92b1568fb5f1293544118a02fa099c2038d44061fb901d61729e8c912fdf55cd09b318a7afe8b2c90bb666c95e77fad620aec55596880b308615574df65573f0405bcf4fd4f6a0ec841efe277f89c62bcd145a241c76c87a1bca724d1e4309810f9fe9013d4e210f3fb"}]}, 0x17c}, 0x1, 0x0, 0x0, 0x90}, 0x10) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2904.405933][T23956] bond1131: (slave bridge1157): making interface the new active one [ 2904.422707][T23956] bridge1157: entered promiscuous mode [ 2904.442494][T23956] bond1131: (slave bridge1157): Enslaving as an active interface with an up link 17:04:29 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0xd00, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2904.455309][T23958] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 17:04:29 executing program 0: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000200), 0xf000) sendfile(r0, r1, 0x0, 0xf03b0000) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(r2, r1, &(0x7f00000002c0)=0x335773c3, 0x8) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r3, &(0x7f0000000200), 0xf000) sendfile(r3, r4, 0x0, 0xf03b0000) sendfile(r3, r3, &(0x7f0000000180), 0x0) r5 = bpf$BPF_MAP_GET_FD_BY_ID(0xe, &(0x7f00000004c0)={0x0, 0x7f}, 0xc) r6 = bpf$MAP_CREATE(0x0, &(0x7f0000000500)=@base={0xd, 0x6, 0x0, 0xfffffffc, 0xc82, 0xffffffffffffffff, 0x100, '\x00', 0x0, 0xffffffffffffffff, 0x0, 0x1, 0x3}, 0x48) r7 = bpf$ITER_CREATE(0x21, &(0x7f0000000580), 0x8) r8 = bpf$MAP_CREATE(0x0, &(0x7f00000005c0)=@base={0x17, 0x8, 0x100, 0xfffffffa, 0x900, 0xffffffffffffffff, 0x101, '\x00', 0x0, 0xffffffffffffffff, 0x1, 0x0, 0x1}, 0x48) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000840)='blkio.throttle.io_serviced\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r9, 0x40305828, &(0x7f00000007c0)={0x0, 0x0, 0x0, 0x20040006, 0x17800}) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000006c0)={0x18, 0x5, &(0x7f00000003c0)=@framed={{0x18, 0x0, 0x0, 0x0, 0x4fd, 0x0, 0x0, 0x0, 0x8}, [@ldst={0x2, 0x0, 0x1, 0xa, 0xb, 0x0, 0x4}, @call={0x85, 0x0, 0x0, 0x58}]}, &(0x7f0000000400)='syzkaller\x00', 0x7, 0x0, 0x0, 0x41100, 0x11, '\x00', 0x0, 0x0, r1, 0x8, &(0x7f0000000440)={0x3, 0x1}, 0x8, 0x10, &(0x7f0000000480)={0x1, 0x2, 0x1, 0x48000000}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000640)=[r3, r5, r6, 0xffffffffffffffff, 0x1, 0x1, r7, r8, r9, 0x1]}, 0x80) r10 = socket$netlink(0x10, 0x3, 0x0) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000140)='memory.events\x00', 0x7a05, 0x1700) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r11, &(0x7f0000000200), 0xf000) sendfile(r11, r12, 0x0, 0xf03b0000) sendfile(r11, r11, &(0x7f0000000180), 0x0) sendmsg$nl_route_sched(r11, &(0x7f0000000380)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x1000}, 0xc, &(0x7f0000000340)={&(0x7f0000000300)=ANY=[@ANYBLOB="ffffffff660004002dbd7000fedbdf2500010000", @ANYRES32=0x0, @ANYBLOB="01000100f3ff09000a00050008000b000500000008000b00a4fc000008000b00ff030000"], 0x3c}, 0x1, 0x0, 0x0, 0x20000401}, 0x80) r13 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r10, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) sendmsg$IPCTNL_MSG_EXP_GET_STATS_CPU(r13, &(0x7f0000000280)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000000200)={&(0x7f00000001c0)={0x14, 0x3, 0x2, 0x301, 0x0, 0x0, {0x3, 0x0, 0x2}, [""]}, 0x14}, 0x1, 0x0, 0x0, 0x41}, 0x4000) sendmsg$nl_route(0xffffffffffffffff, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000080)={&(0x7f0000000740)=ANY=[@ANYBLOB="280000006c0000012cbd700001dcdf2500000000", @ANYRES32=0x0, @ANYBLOB="0100010040020000efff2204200000005845552bfac297eacfbd56398c525cf78bb3b61572d5b555be16cdf17056ffa26d08de6ade80a2ae75f10dc465aee3e008d079ac4bc751dc7cc8522d7e7a7b4f28ac23f6"], 0x28}, 0x1, 0x0, 0x0, 0x4000000}, 0x48c4) [ 2904.485285][T23958] workqueue: Failed to create a rescuer kthread for wq "bond514": -EINTR [ 2904.564379][T23962] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:29 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x38}]}, 0x3c}}, 0x0) [ 2904.603255][T23962] workqueue: Failed to create a rescuer kthread for wq "bond694": -EINTR [ 2904.684219][T23969] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:04:29 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x16a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2904.757116][T23969] workqueue: Failed to create a rescuer kthread for wq "bond1055": -EINTR [ 2904.816655][T23975] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2905.049900][T23975] bond816: entered promiscuous mode [ 2905.100303][T23975] 8021q: adding VLAN 0 to HW filter on device bond816 17:04:30 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x60}, 0x0) [ 2905.329345][T23999] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:04:30 executing program 3: socket$netlink(0x10, 0x3, 0x0) (async) r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$TCPDIAG_GETSOCK(r0, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000040)={&(0x7f0000000280)={0x17c, 0x12, 0x100, 0x70bd28, 0x25dfdbfb, {0x21, 0x8, 0x5d, 0x3f, {0x4e24, 0x4e24, [0x80000000, 0x5b9, 0x8, 0x400], [0x3, 0x25, 0x5, 0x3], 0x0, [0x48, 0xffff58c4]}, 0x4, 0x2}, [@INET_DIAG_REQ_BYTECODE={0x97, 0x1, "5523cf7c16cea21ea6aa50d5dedd373327228f0e4b034e37d6fff78693d41885ab6a160d0ebf81ddbb238716f7cda3b4f6e0f665f20d31460236273cd9c851104cac004691c5235f858c0228818399a681b81b0b30b1188ee906a33a51e9373de2e0d3ccd80d07f12a82e93205374a8d225a337a733384fc13591fb5068a4cdb2f69020be9c7bc94f1f42006d40c2456f2cd5f"}, @INET_DIAG_REQ_BYTECODE={0x97, 0x1, "11330b142633e3b9b4392d4650dfe1b49dbfd1a3f21845f5f9452e500b661238d54760d2f58520ca56a92b1568fb5f1293544118a02fa099c2038d44061fb901d61729e8c912fdf55cd09b318a7afe8b2c90bb666c95e77fad620aec55596880b308615574df65573f0405bcf4fd4f6a0ec841efe277f89c62bcd145a241c76c87a1bca724d1e4309810f9fe9013d4e210f3fb"}]}, 0x17c}, 0x1, 0x0, 0x0, 0x90}, 0x10) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) [ 2905.577343][T23999] bond1132: entered promiscuous mode [ 2905.621628][T23999] 8021q: adding VLAN 0 to HW filter on device bond1132 [ 2905.781477][T24001] bond1132: (slave bridge1158): making interface the new active one [ 2905.792925][T24001] bridge1158: entered promiscuous mode [ 2905.812848][T24001] bond1132: (slave bridge1158): Enslaving as an active interface with an up link 17:04:30 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x116a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:30 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x39}]}, 0x3c}}, 0x0) [ 2905.834816][T24008] workqueue: Failed to create a rescuer kthread for wq "bond694": -EINTR 17:04:31 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x16c, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:31 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NBD_CMD_DISCONNECT(r1, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x33}}, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x59) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c00000010000d0700155abff63400000000ff0f", @ANYRES32=r2, @ANYBLOB="00000000e60000001c0012000c00010062165b4e"], 0x3c}}, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket$packet(0x11, 0x3, 0x300) r5 = socket$nl_route(0x10, 0x3, 0x0) getsockname$packet(r1, &(0x7f00000002c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000240)=0x14) sendmsg$nl_route(r5, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f00000003c0)=ANY=[@ANYBLOB="50000000100001040000ff0f0000000000000000", @ANYRES32=0x0, @ANYBLOB="00000000000000002800128009000100766c616e00000000180002800c0002001c0000001b000000060001000100000008000500", @ANYRES32=r6], 0x50}}, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r3, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=ANY=[@ANYBLOB="28000000100025080000000000f15cd9de000000", @ANYRES32=r7, @ANYBLOB="000000000000000008000a0010"], 0x28}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000580)=@dellink={0x20, 0x11, 0x21, 0x0, 0x0, {0x2, 0x0, 0x0, r2}}, 0x20}}, 0x0) [ 2905.957714][T24012] workqueue: Failed to create a rescuer kthread for wq "bond1055": -EINTR [ 2906.295802][T24018] bond817: entered promiscuous mode [ 2906.343980][T24018] 8021q: adding VLAN 0 to HW filter on device bond817 17:04:31 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0xf0}, 0x0) 17:04:31 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$TCPDIAG_GETSOCK(r0, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000040)={&(0x7f0000000280)={0x17c, 0x12, 0x100, 0x70bd28, 0x25dfdbfb, {0x21, 0x8, 0x5d, 0x3f, {0x4e24, 0x4e24, [0x80000000, 0x5b9, 0x8, 0x400], [0x3, 0x25, 0x5, 0x3], 0x0, [0x48, 0xffff58c4]}, 0x4, 0x2}, [@INET_DIAG_REQ_BYTECODE={0x97, 0x1, "5523cf7c16cea21ea6aa50d5dedd373327228f0e4b034e37d6fff78693d41885ab6a160d0ebf81ddbb238716f7cda3b4f6e0f665f20d31460236273cd9c851104cac004691c5235f858c0228818399a681b81b0b30b1188ee906a33a51e9373de2e0d3ccd80d07f12a82e93205374a8d225a337a733384fc13591fb5068a4cdb2f69020be9c7bc94f1f42006d40c2456f2cd5f"}, @INET_DIAG_REQ_BYTECODE={0x97, 0x1, "11330b142633e3b9b4392d4650dfe1b49dbfd1a3f21845f5f9452e500b661238d54760d2f58520ca56a92b1568fb5f1293544118a02fa099c2038d44061fb901d61729e8c912fdf55cd09b318a7afe8b2c90bb666c95e77fad620aec55596880b308615574df65573f0405bcf4fd4f6a0ec841efe277f89c62bcd145a241c76c87a1bca724d1e4309810f9fe9013d4e210f3fb"}]}, 0x17c}, 0x1, 0x0, 0x0, 0x90}, 0x10) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$TCPDIAG_GETSOCK(r0, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000040)={&(0x7f0000000280)={0x17c, 0x12, 0x100, 0x70bd28, 0x25dfdbfb, {0x21, 0x8, 0x5d, 0x3f, {0x4e24, 0x4e24, [0x80000000, 0x5b9, 0x8, 0x400], [0x3, 0x25, 0x5, 0x3], 0x0, [0x48, 0xffff58c4]}, 0x4, 0x2}, [@INET_DIAG_REQ_BYTECODE={0x97, 0x1, "5523cf7c16cea21ea6aa50d5dedd373327228f0e4b034e37d6fff78693d41885ab6a160d0ebf81ddbb238716f7cda3b4f6e0f665f20d31460236273cd9c851104cac004691c5235f858c0228818399a681b81b0b30b1188ee906a33a51e9373de2e0d3ccd80d07f12a82e93205374a8d225a337a733384fc13591fb5068a4cdb2f69020be9c7bc94f1f42006d40c2456f2cd5f"}, @INET_DIAG_REQ_BYTECODE={0x97, 0x1, "11330b142633e3b9b4392d4650dfe1b49dbfd1a3f21845f5f9452e500b661238d54760d2f58520ca56a92b1568fb5f1293544118a02fa099c2038d44061fb901d61729e8c912fdf55cd09b318a7afe8b2c90bb666c95e77fad620aec55596880b308615574df65573f0405bcf4fd4f6a0ec841efe277f89c62bcd145a241c76c87a1bca724d1e4309810f9fe9013d4e210f3fb"}]}, 0x17c}, 0x1, 0x0, 0x0, 0x90}, 0x10) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) (async) [ 2906.586471][T24031] bond1133: entered promiscuous mode [ 2906.598106][T24031] 8021q: adding VLAN 0 to HW filter on device bond1133 [ 2906.654966][T24035] bond694: entered promiscuous mode [ 2906.661676][T24035] 8021q: adding VLAN 0 to HW filter on device bond694 17:04:31 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1203, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2906.726649][T24036] bond1133: (slave bridge1159): making interface the new active one [ 2906.735693][T24036] bridge1159: entered promiscuous mode [ 2906.750474][T24036] bond1133: (slave bridge1159): Enslaving as an active interface with an up link 17:04:31 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x3a}]}, 0x3c}}, 0x0) [ 2906.878416][T24041] bond1055: entered promiscuous mode [ 2906.902648][T24041] 8021q: adding VLAN 0 to HW filter on device bond1055 [ 2906.967252][T24069] bond695: entered promiscuous mode [ 2906.981362][T24069] 8021q: adding VLAN 0 to HW filter on device bond695 17:04:32 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1da, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2907.075865][T24043] bond1055: (slave bridge1121): making interface the new active one [ 2907.085311][T24043] bridge1121: entered promiscuous mode [ 2907.100231][T24043] bond1055: (slave bridge1121): Enslaving as an active interface with an up link [ 2907.110786][T24045] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. [ 2907.282451][T24053] bond818: entered promiscuous mode [ 2907.318783][T24053] 8021q: adding VLAN 0 to HW filter on device bond818 17:04:32 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x600}, 0x0) 17:04:32 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x58, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_VF_PORTS={0x38, 0x18, 0x0, 0x1, [{0x20, 0x1, 0x0, 0x1, [@IFLA_PORT_VF={0x8, 0x1, 0x8000}, @IFLA_PORT_REQUEST={0x5, 0x6, 0x80}, @IFLA_PORT_PROFILE={0xb, 0x2, 'bridge\x00'}]}, {0x14, 0x1, 0x0, 0x1, [@IFLA_PORT_REQUEST={0x5, 0x6, 0x4e}, @IFLA_PORT_REQUEST={0x5, 0x6, 0x1}]}]}]}, 0x58}}, 0x0) [ 2907.581798][T24066] bond1134: entered promiscuous mode [ 2907.605113][T24066] 8021q: adding VLAN 0 to HW filter on device bond1134 [ 2907.787666][T24071] bond1134: (slave bridge1160): making interface the new active one [ 2907.813295][T24071] bridge1160: entered promiscuous mode 17:04:32 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x3b}]}, 0x3c}}, 0x0) [ 2907.839102][T24071] bond1134: (slave bridge1160): Enslaving as an active interface with an up link [ 2907.851963][T19369] BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low! [ 2907.857653][T19369] turning off the locking correctness validator. [ 2907.864271][T19369] CPU: 0 PID: 19369 Comm: kworker/u4:2 Not tainted 6.4.0-rc7-syzkaller-01948-gae230642190a #0 [ 2907.874649][T19369] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/27/2023 [ 2907.884939][T19369] Workqueue: bond1134 bond_resend_igmp_join_requests_delayed [ 2907.892405][T19369] Call Trace: [ 2907.895721][T19369] [ 2907.898691][T19369] dump_stack_lvl+0xd9/0x150 [ 2907.903341][T19369] __lock_acquire+0x434b/0x5f30 [ 2907.909112][T19369] ? lockdep_unlock+0x11b/0x290 [ 2907.914798][T19369] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 2907.920842][T19369] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 2907.926870][T19369] ? __lock_acquire+0x1987/0x5f30 [ 2907.932008][T19369] lock_acquire+0x1b1/0x520 [ 2907.936530][T19369] ? debug_object_activate+0x186/0x4f0 [ 2907.942095][T19369] ? lock_sync+0x190/0x190 [ 2907.946529][T19369] ? debug_object_activate+0xfb/0x4f0 [ 2907.951921][T19369] ? lock_downgrade+0x690/0x690 [ 2907.956884][T19369] _raw_spin_lock_irqsave+0x3d/0x60 [ 2907.962103][T19369] ? debug_object_activate+0x186/0x4f0 [ 2907.967580][T19369] debug_object_activate+0x186/0x4f0 [ 2907.972883][T19369] ? debug_object_free+0x360/0x360 [ 2907.978015][T19369] ? spin_bug+0x1c0/0x1c0 [ 2907.982535][T19369] ? get_nohz_timer_target+0x17/0x680 [ 2907.987928][T19369] __mod_timer+0x80d/0xe80 [ 2907.993510][T19369] ? timer_shutdown_sync+0x20/0x20 [ 2907.998728][T19369] add_timer+0x62/0x90 [ 2908.002924][T19369] __queue_delayed_work+0x1a7/0x270 [ 2908.008176][T19369] queue_delayed_work_on+0x109/0x120 [ 2908.013499][T19369] bond_resend_igmp_join_requests_delayed+0x145/0x180 [ 2908.020387][T19369] process_one_work+0x99a/0x15e0 [ 2908.025426][T19369] ? pwq_dec_nr_in_flight+0x2a0/0x2a0 [ 2908.030932][T19369] ? spin_bug+0x1c0/0x1c0 [ 2908.035299][T19369] ? _raw_spin_lock_irq+0x45/0x50 17:04:33 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1240, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:33 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NBD_CMD_DISCONNECT(r1, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x33}}, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x59) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c00000010000d0700155abff63400000000ff0f", @ANYRES32=r2, @ANYBLOB="00000000e60000001c0012000c00010062165b4e"], 0x3c}}, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket$packet(0x11, 0x3, 0x300) r5 = socket$nl_route(0x10, 0x3, 0x0) getsockname$packet(r1, &(0x7f00000002c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000240)=0x14) sendmsg$nl_route(r5, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f00000003c0)=ANY=[@ANYBLOB="50000000100001040000ff0f0000000000000000", @ANYRES32=0x0, @ANYBLOB="00000000000000002800128009000100766c616e00000000180002800c0002001c0000001b000000060001000100000008000500", @ANYRES32=r6], 0x50}}, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r3, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=ANY=[@ANYBLOB="28000000100025080000000000f15cd9de000000", @ANYRES32=r7, @ANYBLOB="000000000000000008000a0010"], 0x28}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000580)=@dellink={0x20, 0x11, 0x21, 0x0, 0x0, {0x2, 0x0, 0x0, r2}}, 0x20}}, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$NBD_CMD_DISCONNECT(r1, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x33}}, 0x0) (async) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x59) (async) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c00000010000d0700155abff63400000000ff0f", @ANYRES32=r2, @ANYBLOB="00000000e60000001c0012000c00010062165b4e"], 0x3c}}, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket$packet(0x11, 0x3, 0x300) (async) socket$nl_route(0x10, 0x3, 0x0) (async) getsockname$packet(r1, &(0x7f00000002c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000240)=0x14) (async) sendmsg$nl_route(r5, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f00000003c0)=ANY=[@ANYBLOB="50000000100001040000ff0f0000000000000000", @ANYRES32=0x0, @ANYBLOB="00000000000000002800128009000100766c616e00000000180002800c0002001c0000001b000000060001000100000008000500", @ANYRES32=r6], 0x50}}, 0x0) (async) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) (async) sendmsg$nl_route(r3, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=ANY=[@ANYBLOB="28000000100025080000000000f15cd9de000000", @ANYRES32=r7, @ANYBLOB="000000000000000008000a0010"], 0x28}}, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000580)=@dellink={0x20, 0x11, 0x21, 0x0, 0x0, {0x2, 0x0, 0x0, r2}}, 0x20}}, 0x0) (async) [ 2908.040439][T19369] worker_thread+0x67d/0x10c0 [ 2908.045146][T19369] ? process_one_work+0x15e0/0x15e0 [ 2908.050366][T19369] kthread+0x344/0x440 [ 2908.054453][T19369] ? kthread_complete_and_exit+0x40/0x40 [ 2908.060106][T19369] ret_from_fork+0x1f/0x30 [ 2908.064586][T19369] [ 2908.267376][T24077] workqueue: Failed to create a rescuer kthread for wq "bond1056": -EINTR [ 2908.364332][T24084] validate_nla: 10 callbacks suppressed [ 2908.364358][T24084] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:04:33 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x58, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_VF_PORTS={0x38, 0x18, 0x0, 0x1, [{0x20, 0x1, 0x0, 0x1, [@IFLA_PORT_VF={0x8, 0x1, 0x8000}, @IFLA_PORT_REQUEST={0x5, 0x6, 0x80}, @IFLA_PORT_PROFILE={0xb, 0x2, 'bridge\x00'}]}, {0x14, 0x1, 0x0, 0x1, [@IFLA_PORT_REQUEST={0x5, 0x6, 0x4e}, @IFLA_PORT_REQUEST={0x5, 0x6, 0x1}]}]}]}, 0x58}}, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x58, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_VF_PORTS={0x38, 0x18, 0x0, 0x1, [{0x20, 0x1, 0x0, 0x1, [@IFLA_PORT_VF={0x8, 0x1, 0x8000}, @IFLA_PORT_REQUEST={0x5, 0x6, 0x80}, @IFLA_PORT_PROFILE={0xb, 0x2, 'bridge\x00'}]}, {0x14, 0x1, 0x0, 0x1, [@IFLA_PORT_REQUEST={0x5, 0x6, 0x4e}, @IFLA_PORT_REQUEST={0x5, 0x6, 0x1}]}]}]}, 0x58}}, 0x0) 17:04:33 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1e2, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:04:33 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0xa00}, 0x0) [ 2908.401395][T24084] workqueue: Failed to create a rescuer kthread for wq "bond819": -EINTR [ 2908.471647][T24089] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2908.544448][T24089] bond696: entered promiscuous mode [ 2908.551330][T24089] 8021q: adding VLAN 0 to HW filter on device bond696 [ 2908.567040][T24091] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:04:33 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x3c}]}, 0x3c}}, 0x0) [ 2908.605489][T24091] bond1135: entered promiscuous mode [ 2908.611667][T24091] 8021q: adding VLAN 0 to HW filter on device bond1135 [ 2908.624255][T24095] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. 17:04:33 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1241, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2908.704939][T24096] bond1135: (slave bridge1161): making interface the new active one [ 2908.714292][T24096] bridge1161: entered promiscuous mode [ 2908.725661][T24096] bond1135: (slave bridge1161): Enslaving as an active interface with an up link [ 2908.746665][T24106] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2908.815807][T24106] bond1056: entered promiscuous mode [ 2908.822246][T24106] 8021q: adding VLAN 0 to HW filter on device bond1056 17:04:33 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) (async, rerun: 64) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async, rerun: 64) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x58, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_VF_PORTS={0x38, 0x18, 0x0, 0x1, [{0x20, 0x1, 0x0, 0x1, [@IFLA_PORT_VF={0x8, 0x1, 0x8000}, @IFLA_PORT_REQUEST={0x5, 0x6, 0x80}, @IFLA_PORT_PROFILE={0xb, 0x2, 'bridge\x00'}]}, {0x14, 0x1, 0x0, 0x1, [@IFLA_PORT_REQUEST={0x5, 0x6, 0x4e}, @IFLA_PORT_REQUEST={0x5, 0x6, 0x1}]}]}]}, 0x58}}, 0x0) [ 2908.969872][T24112] bond1056: (slave bridge1122): making interface the new active one [ 2908.991600][T24112] bridge1122: entered promiscuous mode 17:04:34 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x20a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2909.011667][T24112] bond1056: (slave bridge1122): Enslaving as an active interface with an up link [ 2909.029972][T24111] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2909.124789][T24111] bond819: entered promiscuous mode [ 2909.145779][T24111] 8021q: adding VLAN 0 to HW filter on device bond819 17:04:34 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NBD_CMD_DISCONNECT(r1, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x33}}, 0x0) (async) sendmsg$NBD_CMD_DISCONNECT(r1, &(0x7f00000000c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x33}}, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x59) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c00000010000d0700155abff63400000000ff0f", @ANYRES32=r2, @ANYBLOB="00000000e60000001c0012000c00010062165b4e"], 0x3c}}, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket$netlink(0x10, 0x3, 0x0) socket$packet(0x11, 0x3, 0x300) (async) r4 = socket$packet(0x11, 0x3, 0x300) socket$nl_route(0x10, 0x3, 0x0) (async) r5 = socket$nl_route(0x10, 0x3, 0x0) getsockname$packet(r1, &(0x7f00000002c0)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000240)=0x14) sendmsg$nl_route(r5, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000140)={&(0x7f00000003c0)=ANY=[@ANYBLOB="50000000100001040000ff0f0000000000000000", @ANYRES32=0x0, @ANYBLOB="00000000000000002800128009000100766c616e00000000180002800c0002001c0000001b000000060001000100000008000500", @ANYRES32=r6], 0x50}}, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)) sendmsg$nl_route(r3, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000040)={&(0x7f00000000c0)=ANY=[@ANYBLOB="28000000100025080000000000f15cd9de000000", @ANYRES32=r7, @ANYBLOB="000000000000000008000a0010"], 0x28}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000580)=@dellink={0x20, 0x11, 0x21, 0x0, 0x0, {0x2, 0x0, 0x0, r2}}, 0x20}}, 0x0) [ 2909.239948][T24118] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:34 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0xc00}, 0x0) [ 2909.315654][T24118] bond697: entered promiscuous mode [ 2909.339240][T24118] 8021q: adding VLAN 0 to HW filter on device bond697 [ 2909.367925][T24123] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:04:34 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x3d}]}, 0x3c}}, 0x0) [ 2909.419540][T24123] bond1136: entered promiscuous mode [ 2909.425257][T24123] 8021q: adding VLAN 0 to HW filter on device bond1136 [ 2909.504624][T24125] bond1136: (slave bridge1162): making interface the new active one [ 2909.520843][T24125] bridge1162: entered promiscuous mode [ 2909.542347][T24125] bond1136: (slave bridge1162): Enslaving as an active interface with an up link 17:04:34 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1318, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2909.557092][T24137] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2909.650321][T24137] bond1057: entered promiscuous mode [ 2909.668227][T24137] 8021q: adding VLAN 0 to HW filter on device bond1057 17:04:34 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x10) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x20, 0x10, 0x1, 0x0, 0x25dfdbfc}, 0x20}}, 0x0) 17:04:34 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x10) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async, rerun: 64) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x20, 0x10, 0x1, 0x0, 0x25dfdbfc}, 0x20}}, 0x0) (rerun: 64) [ 2909.893317][T24140] bond1057: (slave bridge1123): making interface the new active one [ 2909.923079][T24140] bridge1123: entered promiscuous mode 17:04:34 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x10) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x20, 0x10, 0x1, 0x0, 0x25dfdbfc}, 0x20}}, 0x0) socket$netlink(0x10, 0x3, 0x10) (async) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000000)=@newlink={0x20, 0x10, 0x1, 0x0, 0x25dfdbfc}, 0x20}}, 0x0) (async) 17:04:35 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x22a, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2909.967865][T24140] bond1057: (slave bridge1123): Enslaving as an active interface with an up link [ 2909.993520][T24144] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2910.055297][T24144] bond820: entered promiscuous mode [ 2910.062148][T24144] 8021q: adding VLAN 0 to HW filter on device bond820 [ 2910.088384][T24146] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.0'. 17:04:35 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x24044090) 17:04:35 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0xe00}, 0x0) 17:04:35 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0xf0}, 0x0) [ 2910.221838][T24154] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:04:35 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x3e}]}, 0x3c}}, 0x0) [ 2910.339619][T24154] bond698: entered promiscuous mode [ 2910.345662][T24154] 8021q: adding VLAN 0 to HW filter on device bond698 [ 2910.433525][T24161] bond1137: entered promiscuous mode [ 2910.439277][T24161] 8021q: adding VLAN 0 to HW filter on device bond1137 17:04:35 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x1403, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2910.506606][T24164] bond1137: (slave bridge1163): making interface the new active one [ 2910.515524][T24164] bridge1163: entered promiscuous mode [ 2910.531580][T24164] bond1137: (slave bridge1163): Enslaving as an active interface with an up link [ 2910.585033][T24186] bond1058: entered promiscuous mode [ 2910.592489][T24186] 8021q: adding VLAN 0 to HW filter on device bond1058 17:04:35 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x24044090) [ 2910.682122][T24192] bond821: entered promiscuous mode [ 2910.687833][T24192] 8021q: adding VLAN 0 to HW filter on device bond821 [ 2910.826092][T24195] bond1058: (slave bridge1124): making interface the new active one [ 2910.842939][T24195] bridge1124: entered promiscuous mode [ 2910.860906][T24195] bond1058: (slave bridge1124): Enslaving as an active interface with an up link 17:04:35 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x242, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2910.951997][T24199] bond514: entered promiscuous mode [ 2910.971816][T24199] 8021q: adding VLAN 0 to HW filter on device bond514 17:04:36 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x1600}, 0x0) 17:04:36 executing program 0: r0 = socket$inet6_sctp(0xa, 0x1, 0x84) bind$inet6(r0, &(0x7f00004b8fe4)={0xa, 0x4e23, 0x0, @empty}, 0x1c) sendto$inet6(r0, &(0x7f00000002c0)='X', 0x1a000, 0x0, &(0x7f0000000200)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) r2 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r0, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000040)}, &(0x7f0000000240)=0x10) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST(r2, 0x84, 0x1d, &(0x7f0000000080)=ANY=[@ANYBLOB="f77f8000", @ANYRES32=0x0], &(0x7f00000000c0)=0x8) setsockopt$inet_sctp6_SCTP_DELAYED_SACK(r1, 0x84, 0x10, &(0x7f00000003c0)=@sack_info={r4, 0x4000, 0x1}, 0xc) r5 = socket$inet_udp(0x2, 0x2, 0x0) bind$inet(r5, &(0x7f0000000480)={0x2, 0x1, @initdev={0xac, 0x1e, 0x1, 0x0}}, 0x10) setsockopt$sock_int(r5, 0x1, 0x6, &(0x7f0000000140)=0x32, 0x4) connect$inet(r5, &(0x7f0000000280)={0x2, 0x0, @broadcast}, 0x10) sendfile(r0, 0xffffffffffffffff, 0x0, 0x10000000000) sendmmsg$inet(r5, &(0x7f0000002080)=[{{0x0, 0x0, 0x0, 0x0, &(0x7f0000000680)=ANY=[@ANYBLOB="100000000000000000000060070000001c00000000008f2b6395c6be000000001c2065c3c932c1267f82fd1bf1b2885cc1ce88102f564db395b8285f7d9b5ffc9133c5cd03c4cf0317205b36a624cc328e4f3d8c093868e64eb64afa752e19", @ANYRES32=r3, @ANYBLOB="ac1485cb7d1a03c52014aa0066b66c5a6016db9dbb0ae87569b61cac9d80aee4881c9d0000000000000091f50800d2bda4dadee6192e8159224f47eb0162d0a926359ed415921a01aa33cf84cc05110d0a330b3aba806ed1c8bb41e91f30740146a8d032eccf98e899fbd8f0a87c7da60070ba261a0ee0b5e4be7623b3"], 0x30}}], 0x1, 0x0) r6 = socket$netlink(0x10, 0x3, 0x0) sendmsg$netlink(r6, &(0x7f0000006440)={0x0, 0x0, &(0x7f00000063c0)=[{&(0x7f0000000600)=ANY=[@ANYBLOB="340000001000010000000000000000005e000000faff5af608000000", @ANYRES32=0x0, @ANYBLOB="14001b0000000000000000210000000000000001"], 0x34}], 0x1}, 0x0) r7 = socket$inet6(0xa, 0x800000000000002, 0x0) setsockopt$inet6_int(r7, 0x29, 0x46, &(0x7f0000000040)=0x3, 0x4) connect$inet6(r7, &(0x7f0000000000)={0xa, 0x0, 0x0, @local, 0x5}, 0x1c) sendmmsg$inet(r7, &(0x7f0000002240)=[{{0x0, 0x0, 0x0}}], 0x40000e2, 0x0) getsockopt$inet_sctp6_SCTP_RTOINFO(r0, 0x84, 0x0, &(0x7f0000000100)={r4, 0x8000, 0x3ff, 0x7}, &(0x7f0000000140)=0x10) r8 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000240)={0x11, 0x3, &(0x7f0000000640)=ANY=[@ANYRES32=r1], &(0x7f00000000c0)='GPL\x00', 0x4, 0x91, &(0x7f0000000000)=""/145, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000004c0)='contention_begin\x00', r8}, 0x10) ioctl$FS_IOC_GETVERSION(r8, 0x80087601, &(0x7f0000000180)) unshare(0x6c060000) r9 = accept4(r0, &(0x7f0000000400)=@nfc, &(0x7f00000001c0)=0x80, 0x81c00) sendto$inet6(r9, &(0x7f0000000500)="259330942194c017edb00fc5e63de78f0af422ce261a6642858a3527591b7554225841729f1cc478555713800f8c976bc372b7dd22d0616b818da75466de12bb80a24b81562446c462f83a94362a1302", 0x50, 0x88c0, &(0x7f0000000380)={0xa, 0x4e21, 0xce9b, @private0={0xfc, 0x0, '\x00', 0x1}, 0x22b8}, 0x1c) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x0, 0x10, 0xffffffffffffffff, 0x0) setsockopt$EBT_SO_SET_ENTRIES(0xffffffffffffffff, 0x0, 0x80, 0x0, 0x250) socket$inet_smc(0x2b, 0x1, 0x0) [ 2911.164144][T24206] bond699: entered promiscuous mode [ 2911.170173][T24206] 8021q: adding VLAN 0 to HW filter on device bond699 17:04:36 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x41}]}, 0x3c}}, 0x0) [ 2911.387367][T24210] bond1138: entered promiscuous mode [ 2911.393828][T24210] 8021q: adding VLAN 0 to HW filter on device bond1138 17:04:36 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x14b0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2911.499420][T24212] bond1138: (slave bridge1164): making interface the new active one [ 2911.507710][T24212] bridge1164: entered promiscuous mode [ 2911.529367][T24212] bond1138: (slave bridge1164): Enslaving as an active interface with an up link 17:04:36 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async, rerun: 64) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc, 0x2e, {0x0, 0x1}}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x24044090) (rerun: 64) [ 2911.652845][T24220] bond1059: entered promiscuous mode [ 2911.658726][T24220] 8021q: adding VLAN 0 to HW filter on device bond1059 17:04:36 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x244, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2911.774364][T24221] bond1059: (slave bridge1125): making interface the new active one [ 2911.783579][T24221] bridge1125: entered promiscuous mode [ 2911.798995][T24221] bond1059: (slave bridge1125): Enslaving as an active interface with an up link [ 2911.869618][T24225] bond822: entered promiscuous mode [ 2911.875327][T24225] 8021q: adding VLAN 0 to HW filter on device bond822 17:04:36 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x5865}, 0x0) [ 2911.976885][T24233] bond700: entered promiscuous mode [ 2911.990317][T24233] 8021q: adding VLAN 0 to HW filter on device bond700 17:04:37 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x42}]}, 0x3c}}, 0x0) [ 2912.174057][T24236] ‘|‚ªu: left allmulticast mode [ 2912.256270][T24236] bond1: left promiscuous mode [ 2912.262839][T24236] bridge17: left promiscuous mode [ 2912.326188][T24236] bond2: left promiscuous mode [ 2912.342154][T24236] bridge18: left promiscuous mode [ 2912.424909][T24236] bond3: left promiscuous mode [ 2912.434524][T24236] bridge19: left promiscuous mode [ 2912.517189][T24236] bond4: left promiscuous mode [ 2912.522887][T24236] bridge20: left promiscuous mode [ 2912.588448][T24236] bond5: left promiscuous mode [ 2912.598769][T24236] bridge21: left promiscuous mode [ 2912.684852][T24236] bond6: left promiscuous mode [ 2912.702340][T24236] bond7: left promiscuous mode [ 2912.717785][T24236] bond8: left promiscuous mode [ 2912.753895][T24236] bond9: left promiscuous mode [ 2912.787788][T24236] bond10: left promiscuous mode [ 2912.824121][T24236] bond11: left promiscuous mode [ 2912.842865][T24236] bond12: left promiscuous mode [ 2912.877680][T24236] bond13: left promiscuous mode [ 2912.886824][T24236] bridge31: left promiscuous mode [ 2912.948256][T24236] bond14: left promiscuous mode [ 2912.954759][T24236] bridge32: left promiscuous mode [ 2913.029185][T24236] bond15: left promiscuous mode [ 2913.034128][T24236] bridge33: left promiscuous mode [ 2913.079453][T24236] bond16: left promiscuous mode [ 2913.084378][T24236] bridge34: left promiscuous mode [ 2913.153906][T24236] bond17: left promiscuous mode [ 2913.172981][T24236] bridge35: left promiscuous mode [ 2913.311450][T24236] bond18: left promiscuous mode [ 2913.316460][T24236] bridge36: left promiscuous mode [ 2913.446131][T24236] bond19: left promiscuous mode [ 2913.456594][T24236] bridge37: left promiscuous mode [ 2913.563828][T24236] bond20: left promiscuous mode [ 2913.575665][T24236] bridge38: left promiscuous mode [ 2913.635189][T24236] bond21: left promiscuous mode [ 2913.640847][T24236] bridge39: left promiscuous mode [ 2913.687325][T24236] bond22: left promiscuous mode [ 2913.693169][T24236] bridge40: left promiscuous mode [ 2913.741403][T24236] bond23: left promiscuous mode [ 2913.746475][T24236] bridge41: left promiscuous mode [ 2913.794827][T24236] bond24: left promiscuous mode [ 2913.801035][T24236] bridge42: left promiscuous mode [ 2913.850656][T24236] bond25: left promiscuous mode [ 2913.871080][T24236] bond26: left promiscuous mode [ 2913.891415][T24236] bond27: left promiscuous mode [ 2913.912393][T24236] bond28: left promiscuous mode [ 2913.933335][T24236] bond29: left promiscuous mode [ 2913.954568][T24236] bond30: left promiscuous mode [ 2913.975815][T24236] bond31: left promiscuous mode [ 2914.001715][T24236] bond32: left promiscuous mode [ 2914.022963][T24236] bond33: left promiscuous mode [ 2914.045910][T24236] bond34: left promiscuous mode [ 2914.051295][T24236] bridge67: left promiscuous mode [ 2914.100651][T24236] bond35: left promiscuous mode [ 2914.105773][T24236] bridge68: left promiscuous mode [ 2914.156329][T24236] bond36: left promiscuous mode [ 2914.162121][T24236] bridge69: left promiscuous mode [ 2914.212857][T24236] bond37: left promiscuous mode [ 2914.217987][T24236] bridge70: left promiscuous mode [ 2914.270641][T24236] bond38: left promiscuous mode [ 2914.296410][T24236] bond39: left promiscuous mode [ 2914.302126][T24236] bridge71: left promiscuous mode [ 2914.366263][T24236] bond40: left promiscuous mode [ 2914.376251][T24236] bridge72: left promiscuous mode [ 2914.424647][T24236] bond41: left promiscuous mode [ 2914.430219][T24236] bridge73: left promiscuous mode [ 2914.477404][T24236] bond42: left promiscuous mode [ 2914.483990][T24236] bridge74: left promiscuous mode [ 2914.508133][T24236] bond43: left promiscuous mode [ 2914.513985][T24236] bridge75: left promiscuous mode [ 2914.531387][T24236] bond44: left promiscuous mode [ 2914.560116][T24236] bridge76: left promiscuous mode [ 2914.673086][T24236] bond45: left promiscuous mode [ 2914.684632][T24236] bond46: left promiscuous mode [ 2914.711072][T24236] bond47: left promiscuous mode [ 2914.732344][T24236] bond48: left promiscuous mode [ 2914.755557][T24236] bond49: left promiscuous mode [ 2914.779176][T24236] bond50: left promiscuous mode [ 2914.814299][T24236] bond51: left promiscuous mode [ 2914.843962][T24236] bond52: left promiscuous mode [ 2914.872604][T24236] bond53: left promiscuous mode [ 2914.896966][T24236] bond54: left promiscuous mode [ 2914.931933][T24236] bond55: left promiscuous mode [ 2914.956919][T24236] bond56: left promiscuous mode [ 2914.995528][T24236] bond57: left promiscuous mode [ 2915.023488][T24236] bond58: left promiscuous mode [ 2915.052326][T24236] bond59: left promiscuous mode [ 2915.083896][T24236] bond60: left promiscuous mode [ 2915.113659][T24236] bond61: left promiscuous mode [ 2915.158283][T24236] bond62: left promiscuous mode [ 2915.171494][T24236] bond63: left promiscuous mode [ 2915.194231][T24236] bond64: left promiscuous mode [ 2915.231744][T24236] bond65: left promiscuous mode [ 2915.270929][T24236] bond66: left promiscuous mode [ 2915.294573][T24236] bond67: left promiscuous mode [ 2915.324567][T24236] bond68: left promiscuous mode [ 2915.347767][T24236] bond69: left promiscuous mode [ 2915.372523][T24236] bond70: left promiscuous mode [ 2915.397792][T24236] bond71: left promiscuous mode [ 2915.425477][T24236] bond72: left promiscuous mode [ 2915.445873][T24236] bond73: left promiscuous mode [ 2915.465115][T24236] bond74: left promiscuous mode [ 2915.484372][T24236] bond75: left promiscuous mode [ 2915.503061][T24236] bond76: left promiscuous mode [ 2915.549128][T24236] bond77: left promiscuous mode [ 2915.575100][T24236] bond78: left promiscuous mode [ 2915.599247][T24236] bond79: left promiscuous mode [ 2915.615867][T24236] bond80: left promiscuous mode [ 2915.621406][T24236] bridge89: left promiscuous mode [ 2915.665532][T24236] bond81: left promiscuous mode [ 2915.684647][T24236] bond82: left promiscuous mode [ 2915.704388][T24236] bond83: left promiscuous mode [ 2915.709899][T24236] bridge92: left promiscuous mode [ 2915.755536][T24236] bond84: left promiscuous mode [ 2915.761049][T24236] bridge93: left promiscuous mode [ 2915.806609][T24236] bond85: left promiscuous mode [ 2915.812004][T24236] bridge94: left promiscuous mode [ 2915.866657][T24236] bond86: left promiscuous mode [ 2915.872841][T24236] bridge95: left promiscuous mode [ 2915.920421][T24236] bond87: left promiscuous mode [ 2915.925500][T24236] bridge96: left promiscuous mode [ 2915.976786][T24236] bond88: left promiscuous mode [ 2915.986359][T24236] bridge97: left promiscuous mode [ 2916.062906][T24236] bond89: left promiscuous mode [ 2916.085704][T24236] bond90: left promiscuous mode [ 2916.111922][T24236] bond91: left promiscuous mode [ 2916.116856][T24236] bridge103: left promiscuous mode [ 2916.187221][T24236] bond92: left promiscuous mode [ 2916.192357][T24236] bridge104: left promiscuous mode [ 2916.261741][T24236] bond93: left promiscuous mode [ 2916.266674][T24236] bridge105: left promiscuous mode [ 2916.343639][T24236] bond94: left promiscuous mode [ 2916.364510][T24236] bond95: left promiscuous mode [ 2916.385297][T24236] bond96: left promiscuous mode [ 2916.391115][T24236] bridge106: left promiscuous mode [ 2916.451708][T24236] bond97: left promiscuous mode [ 2916.456648][T24236] bridge107: left promiscuous mode [ 2916.528364][T24236] bond98: left promiscuous mode [ 2916.533697][T24236] bridge108: left promiscuous mode [ 2916.598498][T24236] bond99: left promiscuous mode [ 2916.603560][T24236] bridge109: left promiscuous mode [ 2916.661078][T24236] bond100: left promiscuous mode [ 2916.666099][T24236] bridge110: left promiscuous mode [ 2916.742179][T24236] bond101: left promiscuous mode [ 2916.747204][T24236] bridge111: left promiscuous mode [ 2916.809913][T24236] bond102: left promiscuous mode [ 2916.815458][T24236] bridge112: left promiscuous mode [ 2916.879222][T24236] bond103: left promiscuous mode [ 2916.884507][T24236] bridge113: left promiscuous mode [ 2916.940495][T24236] bond104: left promiscuous mode [ 2916.946394][T24236] bridge114: left promiscuous mode [ 2917.010583][T24236] bond105: left promiscuous mode [ 2917.015609][T24236] bridge115: left promiscuous mode [ 2917.085640][T24236] bond106: left promiscuous mode [ 2917.091103][T24236] bridge116: left promiscuous mode [ 2917.147482][T24236] bond107: left promiscuous mode [ 2917.153327][T24236] bridge117: left promiscuous mode [ 2917.229954][T24236] bond108: left promiscuous mode [ 2917.235827][T24236] bridge118: left promiscuous mode [ 2917.286450][T24236] bond109: left promiscuous mode [ 2917.292759][T24236] bridge119: left promiscuous mode [ 2917.345032][T24236] bond110: left promiscuous mode [ 2917.351443][T24236] bridge120: left promiscuous mode [ 2917.420954][T24236] bond111: left promiscuous mode [ 2917.425986][T24236] bridge121: left promiscuous mode [ 2917.483840][T24236] bond112: left promiscuous mode [ 2917.489659][T24236] bridge122: left promiscuous mode [ 2917.545424][T24236] bond113: left promiscuous mode [ 2917.551211][T24236] bridge123: left promiscuous mode [ 2917.628380][T24236] bond114: left promiscuous mode [ 2917.635492][T24236] bridge124: left promiscuous mode [ 2917.696438][T24236] bond115: left promiscuous mode [ 2917.702731][T24236] bridge125: left promiscuous mode [ 2917.786958][T24236] bond116: left promiscuous mode [ 2917.809823][T24236] bond117: left promiscuous mode [ 2917.831360][T24236] bond118: left promiscuous mode [ 2917.859126][T24236] bond119: left promiscuous mode [ 2917.874504][T24236] bond120: left promiscuous mode [ 2917.894693][T24236] bond121: left promiscuous mode [ 2917.915254][T24236] bond122: left promiscuous mode [ 2917.954986][T24236] bond123: left promiscuous mode [ 2917.986185][T24236] bond124: left promiscuous mode [ 2918.010933][T24236] bond125: left promiscuous mode [ 2918.016092][T24236] bridge126: left promiscuous mode [ 2918.072147][T24236] bond126: left promiscuous mode [ 2918.077249][T24236] bridge127: left promiscuous mode [ 2918.155154][T24236] bond127: left promiscuous mode [ 2918.160265][T24236] bridge128: left promiscuous mode [ 2918.235520][T24236] bond128: left promiscuous mode [ 2918.241104][T24236] bridge129: left promiscuous mode [ 2918.305349][T24236] bond129: left promiscuous mode [ 2918.315503][T24236] bridge130: left promiscuous mode [ 2918.376037][T24236] bond130: left promiscuous mode [ 2918.381666][T24236] bridge131: left promiscuous mode [ 2918.426557][T24236] bond131: left promiscuous mode [ 2918.431779][T24236] bridge132: left promiscuous mode [ 2918.477576][T24236] bond132: left promiscuous mode [ 2918.482816][T24236] bridge133: left promiscuous mode [ 2918.545481][T24236] bond133: left promiscuous mode [ 2918.552556][T24236] bridge134: left promiscuous mode [ 2918.617078][T24236] bond134: left promiscuous mode [ 2918.625779][T24236] bridge135: left promiscuous mode [ 2918.677757][T24236] bond135: left promiscuous mode [ 2918.684797][T24236] bridge136: left promiscuous mode [ 2918.730646][T24236] bond136: left promiscuous mode [ 2918.735676][T24236] bridge137: left promiscuous mode [ 2918.782294][T24236] bond137: left promiscuous mode [ 2918.787320][T24236] bridge138: left promiscuous mode [ 2918.837095][T24236] bond138: left promiscuous mode [ 2918.847119][T24236] bridge139: left promiscuous mode [ 2918.905062][T24236] bond139: left promiscuous mode [ 2918.910535][T24236] bridge140: left promiscuous mode [ 2918.958506][T24236] bond140: left promiscuous mode [ 2918.964175][T24236] bridge141: left promiscuous mode [ 2919.014096][T24236] bond141: left promiscuous mode [ 2919.019315][T24236] bridge142: left promiscuous mode [ 2919.074069][T24236] bond142: left promiscuous mode [ 2919.079670][T24236] bridge143: left promiscuous mode [ 2919.132747][T24236] bond143: left promiscuous mode [ 2919.137813][T24236] bridge144: left promiscuous mode [ 2919.187336][T24236] bond144: left promiscuous mode [ 2919.193717][T24236] bridge145: left promiscuous mode [ 2919.240200][T24236] bond145: left promiscuous mode [ 2919.245222][T24236] bridge146: left promiscuous mode [ 2919.296624][T24236] bond146: left promiscuous mode [ 2919.306424][T24236] bridge147: left promiscuous mode [ 2919.370585][T24236] bond147: left promiscuous mode [ 2919.375816][T24236] bridge148: left promiscuous mode [ 2919.429461][T24236] bond148: left promiscuous mode [ 2919.434479][T24236] bridge149: left promiscuous mode [ 2919.484297][T24236] bond149: left promiscuous mode [ 2919.490937][T24236] bridge150: left promiscuous mode [ 2919.536147][T24236] bond150: left promiscuous mode [ 2919.541485][T24236] bridge151: left promiscuous mode [ 2919.607422][T24236] bond151: left promiscuous mode [ 2919.613860][T24236] bridge152: left promiscuous mode [ 2919.656994][T24236] bond152: left promiscuous mode [ 2919.662158][T24236] bridge153: left promiscuous mode [ 2919.707023][T24236] bond153: left promiscuous mode [ 2919.712336][T24236] bridge154: left promiscuous mode [ 2919.756411][T24236] bond154: left promiscuous mode [ 2919.761573][T24236] bridge155: left promiscuous mode [ 2919.806146][T24236] bond155: left promiscuous mode [ 2919.830290][T24236] bond156: left promiscuous mode [ 2919.835317][T24236] bridge156: left promiscuous mode [ 2919.894664][T24236] bond157: left promiscuous mode [ 2919.900442][T24236] bridge157: left promiscuous mode [ 2919.945045][T24236] bond158: left promiscuous mode [ 2919.950116][T24236] bridge158: left promiscuous mode [ 2920.003126][T24236] bond159: left promiscuous mode [ 2920.008155][T24236] bridge159: left promiscuous mode [ 2920.053929][T24236] bond160: left promiscuous mode [ 2920.063542][T24236] bridge160: left promiscuous mode [ 2920.126358][T24236] bond161: left promiscuous mode [ 2920.133738][T24236] bridge161: left promiscuous mode [ 2920.198481][T24236] bond162: left promiscuous mode [ 2920.212381][T24236] bridge162: left promiscuous mode [ 2920.286725][T24236] bond163: left promiscuous mode [ 2920.300025][T24236] bridge163: left promiscuous mode [ 2920.358198][T24236] bond164: left promiscuous mode [ 2920.364007][T24236] bridge164: left promiscuous mode [ 2920.409202][T24236] bond165: left promiscuous mode [ 2920.414220][T24236] bridge165: left promiscuous mode [ 2920.459458][T24236] bond166: left promiscuous mode [ 2920.464489][T24236] bridge166: left promiscuous mode [ 2920.512543][T24236] bond167: left promiscuous mode [ 2920.517578][T24236] bridge167: left promiscuous mode [ 2920.563666][T24236] bond168: left promiscuous mode [ 2920.568888][T24236] bridge168: left promiscuous mode [ 2920.613656][T24236] bond169: left promiscuous mode [ 2920.618904][T24236] bridge169: left promiscuous mode [ 2920.667193][T24236] bond170: left promiscuous mode [ 2920.672403][T24236] bridge170: left promiscuous mode [ 2920.718467][T24236] bond171: left promiscuous mode [ 2920.725208][T24236] bridge171: left promiscuous mode [ 2920.789733][T24236] bond172: left promiscuous mode [ 2920.810215][T24236] bond173: left promiscuous mode [ 2920.815326][T24236] bridge172: left promiscuous mode [ 2920.863480][T24236] bond174: left promiscuous mode [ 2920.868497][T24236] bridge173: left promiscuous mode [ 2920.913579][T24236] bond175: left promiscuous mode [ 2920.919743][T24236] bridge174: left promiscuous mode [ 2920.973427][T24236] bond176: left promiscuous mode [ 2920.978460][T24236] bridge175: left promiscuous mode [ 2921.033124][T24236] bond177: left promiscuous mode [ 2921.038222][T24236] bridge176: left promiscuous mode [ 2921.083413][T24236] bond178: left promiscuous mode [ 2921.088442][T24236] bridge177: left promiscuous mode [ 2921.137544][T24236] bond179: left promiscuous mode [ 2921.161787][T24236] bond180: left promiscuous mode [ 2921.166817][T24236] bridge178: left promiscuous mode [ 2921.222196][T24236] bond181: left promiscuous mode [ 2921.227324][T24236] bridge179: left promiscuous mode [ 2921.275271][T24236] bond182: left promiscuous mode [ 2921.280485][T24236] bridge180: left promiscuous mode [ 2921.325006][T24236] bond183: left promiscuous mode [ 2921.344939][T24236] bond184: left promiscuous mode [ 2921.365173][T24236] bond185: left promiscuous mode [ 2921.384184][T24236] bond186: left promiscuous mode [ 2921.403352][T24236] bond187: left promiscuous mode [ 2921.408369][T24236] bridge181: left promiscuous mode [ 2921.454374][T24236] bond188: left promiscuous mode [ 2921.461679][T24236] bridge182: left promiscuous mode [ 2921.520418][T24236] bond189: left promiscuous mode [ 2921.537973][T24236] bond190: left promiscuous mode [ 2921.593319][T24236] bridge183: left promiscuous mode [ 2921.598515][T24236] bridge183: left allmulticast mode [ 2921.626035][T24236] bond191: left promiscuous mode [ 2921.670939][T24236] bond192: left promiscuous mode [ 2921.676265][T24236] bridge184: left promiscuous mode [ 2921.727629][T24236] bond193: left promiscuous mode [ 2921.733724][T24236] bridge185: left promiscuous mode [ 2921.795201][T24236] bond194: left promiscuous mode [ 2921.803720][T24236] bridge186: left promiscuous mode [ 2921.877163][T24236] bond195: left promiscuous mode [ 2921.883254][T24236] bridge187: left promiscuous mode [ 2921.942740][T24236] bond196: left promiscuous mode [ 2921.948021][T24236] bridge188: left promiscuous mode [ 2922.014953][T24236] bond197: left promiscuous mode [ 2922.021499][T24236] bridge189: left promiscuous mode [ 2922.076598][T24236] bond198: left promiscuous mode [ 2922.081834][T24236] bridge190: left promiscuous mode [ 2922.146139][T24236] bond199: left promiscuous mode [ 2922.165340][T24236] bond200: left promiscuous mode [ 2922.170642][T24236] bridge191: left promiscuous mode [ 2922.214994][T24236] bond201: left promiscuous mode [ 2922.220736][T24236] bridge192: left promiscuous mode [ 2922.265190][T24236] bond202: left promiscuous mode [ 2922.270310][T24236] bridge193: left promiscuous mode [ 2922.325501][T24236] bond203: left promiscuous mode [ 2922.331265][T24236] bridge194: left promiscuous mode [ 2922.383500][T24236] bond204: left promiscuous mode [ 2922.388521][T24236] bridge195: left promiscuous mode [ 2922.437593][T24236] bond205: left promiscuous mode [ 2922.443972][T24236] bridge196: left promiscuous mode [ 2922.532717][T24236] bond206: left promiscuous mode [ 2922.537802][T24236] bridge197: left promiscuous mode [ 2922.608030][T24236] bond207: left promiscuous mode [ 2922.615062][T24236] bridge198: left promiscuous mode [ 2922.681910][T24236] bond208: left promiscuous mode [ 2922.686931][T24236] bridge199: left promiscuous mode [ 2922.731957][T24236] bond209: left promiscuous mode [ 2922.736997][T24236] bridge200: left promiscuous mode [ 2922.785328][T24236] bond210: left promiscuous mode [ 2922.812312][T24236] bond211: left promiscuous mode [ 2922.838303][T24236] bond212: left promiscuous mode [ 2922.872019][T24236] bond213: left promiscuous mode [ 2922.877036][T24236] bridge201: left promiscuous mode [ 2922.942158][T24236] bond214: left promiscuous mode [ 2922.947186][T24236] bridge202: left promiscuous mode [ 2923.013793][T24236] bond215: left promiscuous mode [ 2923.046820][T24236] bond216: left promiscuous mode [ 2923.052005][T24236] bridge203: left promiscuous mode [ 2923.104179][T24236] bond217: left promiscuous mode [ 2923.109313][T24236] bridge204: left promiscuous mode [ 2923.159873][T24236] bond218: left promiscuous mode [ 2923.164892][T24236] bridge205: left promiscuous mode [ 2923.215244][T24236] bond219: left promiscuous mode [ 2923.220926][T24236] bridge206: left promiscuous mode [ 2923.277803][T24236] bond220: left promiscuous mode [ 2923.283551][T24236] bridge207: left promiscuous mode [ 2923.352650][T24236] bond221: left promiscuous mode [ 2923.357692][T24236] bridge208: left promiscuous mode [ 2923.432187][T24236] bond222: left promiscuous mode [ 2923.437300][T24236] bridge209: left promiscuous mode [ 2923.483700][T24236] bond223: left promiscuous mode [ 2923.489029][T24236] bridge210: left promiscuous mode [ 2923.539286][T24236] bond224: left promiscuous mode [ 2923.544306][T24236] bridge211: left promiscuous mode [ 2923.601857][T24236] bond225: left promiscuous mode [ 2923.606886][T24236] bridge212: left promiscuous mode [ 2923.663693][T24236] bond226: left promiscuous mode [ 2923.669217][T24236] bridge213: left promiscuous mode [ 2923.713149][T24236] bond227: left promiscuous mode [ 2923.718177][T24236] bridge214: left promiscuous mode [ 2923.766222][T24236] bond228: left promiscuous mode [ 2923.773071][T24236] bridge215: left promiscuous mode [ 2923.838351][T24236] bond229: left promiscuous mode [ 2923.845818][T24236] bridge216: left promiscuous mode [ 2923.896986][T24236] bond230: left promiscuous mode [ 2923.903727][T24236] bridge217: left promiscuous mode [ 2923.974336][T24236] bond231: left promiscuous mode [ 2923.979936][T24236] bridge218: left promiscuous mode [ 2924.027814][T24236] bond232: left promiscuous mode [ 2924.033914][T24236] bridge219: left promiscuous mode [ 2924.101997][T24236] bond233: left promiscuous mode [ 2924.107113][T24236] bridge220: left promiscuous mode [ 2924.175783][T24236] bond234: left promiscuous mode [ 2924.181022][T24236] bridge221: left promiscuous mode [ 2924.235972][T24236] bond235: left promiscuous mode [ 2924.249479][T24236] bridge222: left promiscuous mode [ 2924.324065][T24236] bond236: left promiscuous mode [ 2924.330619][T24236] bridge223: left promiscuous mode [ 2924.396317][T24236] bond237: left promiscuous mode [ 2924.402739][T24236] bridge224: left promiscuous mode [ 2924.466090][T24236] bond238: left promiscuous mode [ 2924.471244][T24236] bridge225: left promiscuous mode [ 2924.534083][T24236] bond239: left promiscuous mode [ 2924.565703][T24236] bond240: left promiscuous mode [ 2924.589984][T24236] bond241: left promiscuous mode [ 2924.610641][T24236] bond242: left promiscuous mode [ 2924.631299][T24236] bond243: left promiscuous mode [ 2924.651397][T24236] bond244: left promiscuous mode [ 2924.670083][T24236] bond245: left promiscuous mode [ 2924.688411][T24236] bond246: left promiscuous mode [ 2924.707811][T24236] bond247: left promiscuous mode [ 2924.726805][T24236] bond248: left promiscuous mode [ 2924.747688][T24236] bond249: left promiscuous mode [ 2924.780280][T24236] bond250: left promiscuous mode [ 2924.800358][T24236] bond251: left promiscuous mode [ 2924.823717][T24236] bond252: left promiscuous mode [ 2924.847956][T24236] bond253: left promiscuous mode [ 2924.877427][T24236] bond254: left promiscuous mode [ 2924.897707][T24236] bond255: left promiscuous mode [ 2924.917397][T24236] bond256: left promiscuous mode [ 2924.922887][T24236] bridge226: left promiscuous mode [ 2924.972155][T24236] bond257: left promiscuous mode [ 2924.977182][T24236] bridge227: left promiscuous mode [ 2925.023101][T24236] bond258: left promiscuous mode [ 2925.029823][T24236] bridge228: left promiscuous mode [ 2925.090363][T24236] bond259: left promiscuous mode [ 2925.095455][T24236] bridge229: left promiscuous mode [ 2925.140184][T24236] bond260: left promiscuous mode [ 2925.145206][T24236] bridge230: left promiscuous mode [ 2925.200660][T24236] bond261: left promiscuous mode [ 2925.205681][T24236] bridge231: left promiscuous mode [ 2925.257797][T24236] bond262: left promiscuous mode [ 2925.262988][T24236] bridge232: left promiscuous mode [ 2925.319924][T24236] bond263: left promiscuous mode [ 2925.324969][T24236] bridge233: left promiscuous mode [ 2925.390820][T24236] bond264: left promiscuous mode [ 2925.395844][T24236] bridge234: left promiscuous mode [ 2925.444855][T24236] bond265: left promiscuous mode [ 2925.450687][T24236] bridge235: left promiscuous mode [ 2925.508356][T24236] bond266: left promiscuous mode [ 2925.520904][T24236] bridge236: left promiscuous mode [ 2925.571897][T24236] bond267: left promiscuous mode [ 2925.576912][T24236] bridge237: left promiscuous mode [ 2925.624160][T24236] bond268: left promiscuous mode [ 2925.629661][T24236] bridge238: left promiscuous mode [ 2925.673586][T24236] bond269: left promiscuous mode [ 2925.679318][T24236] bridge239: left promiscuous mode [ 2925.730360][T24236] bond270: left promiscuous mode [ 2925.735380][T24236] bridge240: left promiscuous mode [ 2925.783216][T24236] bond271: left promiscuous mode [ 2925.788232][T24236] bridge241: left promiscuous mode [ 2925.834174][T24236] bond272: left promiscuous mode [ 2925.844199][T24236] bridge242: left promiscuous mode [ 2925.910462][T24236] bond273: left promiscuous mode [ 2925.915575][T24236] bridge243: left promiscuous mode [ 2925.991278][T24236] bond274: left promiscuous mode [ 2926.027258][T24236] bond275: left promiscuous mode [ 2926.033360][T24236] bridge244: left promiscuous mode [ 2926.114077][T24236] bond276: left promiscuous mode [ 2926.151767][T24236] bond277: left promiscuous mode [ 2926.182037][T24236] bond278: left promiscuous mode [ 2926.208221][T24236] bond279: left promiscuous mode [ 2926.234835][T24236] bond280: left promiscuous mode [ 2926.267052][T24236] bond281: left promiscuous mode [ 2926.272192][T24236] bridge245: left promiscuous mode [ 2926.336378][T24236] bond282: left promiscuous mode [ 2926.341614][T24236] bridge246: left promiscuous mode [ 2926.354791][T24236] bond283: left promiscuous mode [ 2926.363841][T24236] bridge247: left promiscuous mode [ 2926.396532][T24236] bond284: left promiscuous mode [ 2926.402249][T24236] bridge248: left promiscuous mode [ 2926.416860][T24236] bond285: left promiscuous mode [ 2926.422967][T24236] bridge249: left promiscuous mode [ 2926.522261][T24236] bond286: left promiscuous mode [ 2926.527290][T24236] bridge250: left promiscuous mode [ 2926.575054][T24236] bond287: left promiscuous mode [ 2926.598266][T24236] bond288: left promiscuous mode [ 2926.604602][T24236] bridge251: left promiscuous mode [ 2926.675632][T24236] bond289: left promiscuous mode [ 2926.681354][T24236] bridge252: left promiscuous mode [ 2926.744652][T24236] bond290: left promiscuous mode [ 2926.822156][T24236] bond291: left promiscuous mode [ 2926.827344][T24236] bridge255: left promiscuous mode [ 2926.923039][T24236] bond292: left promiscuous mode [ 2926.975336][T24236] bond293: left promiscuous mode [ 2927.001269][T24236] bond294: left promiscuous mode [ 2927.007570][T24236] bridge256: left promiscuous mode [ 2927.063543][T24236] bond295: left promiscuous mode [ 2927.070098][T24236] bridge257: left promiscuous mode [ 2927.120995][T24236] bond296: left promiscuous mode [ 2927.198967][T24236] bond297: left promiscuous mode [ 2927.204223][T24236] bridge261: left promiscuous mode [ 2927.267631][T24236] bond298: left promiscuous mode [ 2927.290404][T24236] bond299: left promiscuous mode [ 2927.296109][T24236] bridge266: left promiscuous mode [ 2927.354838][T24236] bond300: left promiscuous mode [ 2927.362172][T24236] bridge267: left promiscuous mode [ 2927.405584][T24236] bridge269: left allmulticast mode [ 2927.432981][T24236] bridge270: left allmulticast mode [ 2927.438389][T24236] bridge271: left allmulticast mode [ 2927.454125][T24236] bond301: left promiscuous mode [ 2927.475161][T24236] bond302: left promiscuous mode [ 2927.511888][T24236] bond303: left promiscuous mode [ 2927.626659][T24236] bond304: left promiscuous mode [ 2927.637402][T24236] bridge275: left promiscuous mode [ 2927.692029][T24236] bond305: left promiscuous mode [ 2927.697077][T24236] bridge276: left promiscuous mode [ 2928.016302][T24236] bond306: left promiscuous mode [ 2928.028331][T24236] bridge284: left promiscuous mode [ 2928.089471][T24236] bond307: left promiscuous mode [ 2928.094495][T24236] bridge285: left promiscuous mode [ 2928.140553][T24236] bond308: left promiscuous mode [ 2928.161774][T24236] bond309: left promiscuous mode [ 2928.166791][T24236] bridge286: left promiscuous mode [ 2928.232719][T24236] bond310: left promiscuous mode [ 2928.237755][T24236] bridge287: left promiscuous mode [ 2928.305808][T24236] bond311: left promiscuous mode [ 2928.311372][T24236] bridge288: left promiscuous mode [ 2928.325240][T24236] bond312: left promiscuous mode [ 2928.335048][T24236] bond313: left promiscuous mode [ 2928.347026][T24236] bridge289: left promiscuous mode [ 2928.390365][T24236] bond314: left promiscuous mode [ 2928.395343][T24236] bridge290: left promiscuous mode [ 2928.431094][T24236] bond315: left promiscuous mode [ 2928.470643][T24236] bond316: left promiscuous mode [ 2928.481879][T24236] bond317: left promiscuous mode [ 2928.487045][T24236] bridge292: left promiscuous mode [ 2928.526086][T24236] bond318: left promiscuous mode [ 2928.534809][T24236] bridge293: left promiscuous mode [ 2928.573039][T24236] bond319: left promiscuous mode [ 2928.578046][T24236] bridge294: left promiscuous mode [ 2928.600558][T24236] bond320: left promiscuous mode [ 2928.605588][T24236] bridge295: left promiscuous mode [ 2928.641272][T24236] bond321: left promiscuous mode [ 2928.646281][T24236] bridge296: left promiscuous mode [ 2928.692277][T24236] bond322: left promiscuous mode [ 2928.707474][T24236] bond323: left promiscuous mode [ 2928.722473][T24236] bond324: left promiscuous mode [ 2928.731233][T24236] bond325: left promiscuous mode [ 2928.736262][T24236] bridge298: left promiscuous mode [ 2928.776964][T24236] bond326: left promiscuous mode [ 2928.782100][T24236] bridge299: left promiscuous mode [ 2928.818969][T24236] bond327: left promiscuous mode [ 2928.834407][T24236] bond328: left promiscuous mode [ 2928.850727][T24236] bond329: left promiscuous mode [ 2928.855931][T24236] bridge300: left promiscuous mode [ 2928.985599][T24236] bond330: left promiscuous mode [ 2929.001522][T24236] bond331: left promiscuous mode [ 2929.006534][T24236] bridge307: left promiscuous mode [ 2929.086995][T24236] bond332: left promiscuous mode [ 2929.092445][T24236] bridge311: left promiscuous mode [ 2929.127405][T24236] bond333: left promiscuous mode [ 2929.146890][T24236] bond334: left promiscuous mode [ 2929.165690][T24236] bond335: left promiscuous mode [ 2929.172540][T24236] bridge313: left promiscuous mode [ 2929.223565][T24236] bond336: left promiscuous mode [ 2929.228732][T24236] bridge314: left promiscuous mode [ 2929.266824][T24236] bond337: left promiscuous mode [ 2929.283495][T24236] bond338: left promiscuous mode [ 2929.300185][T24236] bond339: left promiscuous mode [ 2929.315211][T24236] bond340: left promiscuous mode [ 2929.320290][T24236] bridge316: left promiscuous mode [ 2929.357424][T24236] bond341: left promiscuous mode [ 2929.374089][T24236] bond342: left promiscuous mode [ 2929.391406][T24236] bond343: left promiscuous mode [ 2929.396417][T24236] bridge321: left promiscuous mode [ 2929.432996][T24236] bond344: left promiscuous mode [ 2929.438004][T24236] bridge322: left promiscuous mode [ 2929.474996][T24236] bond345: left promiscuous mode [ 2929.532445][T24236] bond346: left promiscuous mode [ 2929.541709][T24236] bond347: left promiscuous mode [ 2929.552188][T24236] bond348: left promiscuous mode [ 2929.557229][T24236] bridge326: left promiscuous mode [ 2929.574747][T24236] bond349: left promiscuous mode [ 2929.588386][T24236] bridge327: left promiscuous mode [ 2929.602780][T24236] bond350: left promiscuous mode [ 2929.608684][T24236] bridge328: left promiscuous mode [ 2929.628956][T24236] bond351: left promiscuous mode [ 2929.645534][T24236] bridge329: left promiscuous mode [ 2929.664989][T24236] bond352: left promiscuous mode [ 2929.709008][T24236] bond353: left promiscuous mode [ 2929.714034][T24236] bridge330: left promiscuous mode [ 2929.752305][T24236] bond354: left promiscuous mode [ 2929.757405][T24236] bridge331: left promiscuous mode [ 2929.793957][T24236] bond355: left promiscuous mode [ 2929.799658][T24236] bridge332: left promiscuous mode [ 2929.964903][T24236] bond356: left promiscuous mode [ 2929.970172][T24236] bridge339: left promiscuous mode [ 2930.007356][T24236] bond357: left promiscuous mode [ 2930.012822][T24236] bridge340: left promiscuous mode [ 2930.047257][T24236] bond358: left promiscuous mode [ 2930.054811][T24236] bridge341: left promiscuous mode [ 2930.096751][T24236] bond359: left promiscuous mode [ 2930.102114][T24236] bridge342: left promiscuous mode [ 2930.121238][T24236] bond360: left promiscuous mode [ 2930.128638][T24236] bridge343: left promiscuous mode [ 2930.180195][T24236] bond361: left promiscuous mode [ 2930.185197][T24236] bridge344: left promiscuous mode [ 2930.206237][T24236] bond362: left promiscuous mode [ 2930.212399][T24236] bridge345: left promiscuous mode [ 2930.238024][T24236] bond363: left promiscuous mode [ 2930.244391][T24236] bridge346: left promiscuous mode [ 2930.272278][T24236] bond364: left promiscuous mode [ 2930.277787][T24236] bridge347: left promiscuous mode [ 2930.325046][T24236] bond365: left promiscuous mode [ 2930.331277][T24236] bridge348: left promiscuous mode [ 2930.349281][T24236] bond366: left promiscuous mode [ 2930.363955][T24236] bridge349: left promiscuous mode [ 2930.394881][T24236] bond367: left promiscuous mode [ 2930.406999][T24236] bridge350: left promiscuous mode [ 2930.445199][T24236] bond368: left promiscuous mode [ 2930.450774][T24236] bridge351: left promiscuous mode [ 2930.487865][T24236] bond369: left promiscuous mode [ 2930.493320][T24236] bridge352: left promiscuous mode [ 2930.531031][T24236] bond370: left promiscuous mode [ 2930.536295][T24236] bridge353: left promiscuous mode [ 2930.572517][T24236] bond371: left promiscuous mode [ 2930.577521][T24236] bridge354: left promiscuous mode [ 2930.595140][T24236] bond372: left promiscuous mode [ 2930.600555][T24236] bridge355: left promiscuous mode [ 2930.655630][T24236] bond373: left promiscuous mode [ 2930.661048][T24236] bridge356: left promiscuous mode [ 2930.696986][T24236] bond374: left promiscuous mode [ 2930.702442][T24236] bridge357: left promiscuous mode [ 2930.739125][T24236] bond375: left promiscuous mode [ 2930.744131][T24236] bridge358: left promiscuous mode [ 2930.782909][T24236] bond376: left promiscuous mode [ 2930.787920][T24236] bridge359: left promiscuous mode [ 2930.825143][T24236] bond377: left promiscuous mode [ 2930.831399][T24236] bridge360: left promiscuous mode [ 2930.868029][T24236] bond378: left promiscuous mode [ 2930.873808][T24236] bridge361: left promiscuous mode [ 2930.911205][T24236] bond379: left promiscuous mode [ 2930.916218][T24236] bridge362: left promiscuous mode [ 2930.952566][T24236] bond380: left promiscuous mode [ 2930.957576][T24236] bridge363: left promiscuous mode [ 2930.994889][T24236] bond381: left promiscuous mode [ 2931.000363][T24236] bridge364: left promiscuous mode [ 2931.039402][T24236] bond382: left promiscuous mode [ 2931.044392][T24236] bridge365: left promiscuous mode [ 2931.082444][T24236] bond383: left promiscuous mode [ 2931.087443][T24236] bridge366: left promiscuous mode [ 2931.125173][T24236] bond384: left promiscuous mode [ 2931.130660][T24236] bridge367: left promiscuous mode [ 2931.168080][T24236] bond385: left promiscuous mode [ 2931.173836][T24236] bridge368: left promiscuous mode [ 2931.206269][T24236] bond386: left promiscuous mode [ 2931.211377][T24236] bridge369: left promiscuous mode [ 2931.245720][T24236] bond387: left promiscuous mode [ 2931.261814][T24236] bond388: left promiscuous mode [ 2931.266811][T24236] bridge370: left promiscuous mode [ 2931.328321][T24236] bond389: left promiscuous mode [ 2931.333497][T24236] bridge372: left promiscuous mode [ 2931.367687][T24236] bond390: left promiscuous mode [ 2931.375274][T24236] bridge373: left promiscuous mode [ 2931.411185][T24236] bond391: left promiscuous mode [ 2931.416189][T24236] bridge374: left promiscuous mode [ 2931.452298][T24236] bond392: left promiscuous mode [ 2931.457470][T24236] bridge375: left promiscuous mode [ 2931.493901][T24236] bond393: left promiscuous mode [ 2931.499580][T24236] bridge376: left promiscuous mode [ 2931.536892][T24236] bond394: left promiscuous mode [ 2931.542418][T24236] bridge377: left promiscuous mode [ 2931.580208][T24236] bond395: left promiscuous mode [ 2931.585187][T24236] bridge378: left promiscuous mode [ 2931.623959][T24236] bond396: left promiscuous mode [ 2931.630854][T24236] bridge379: left promiscuous mode [ 2931.667560][T24236] bond397: left promiscuous mode [ 2931.672604][T24236] bridge380: left promiscuous mode [ 2931.693949][T24236] bond398: left promiscuous mode [ 2931.699213][T24236] bridge381: left promiscuous mode [ 2931.726224][T24236] bond399: left promiscuous mode [ 2931.733097][T24236] bridge382: left promiscuous mode [ 2931.776570][T24236] bond400: left promiscuous mode [ 2931.782234][T24236] bridge383: left promiscuous mode [ 2931.803220][T24236] bond401: left promiscuous mode [ 2931.808245][T24236] bridge384: left promiscuous mode [ 2931.852774][T24236] bond402: left promiscuous mode [ 2931.857793][T24236] bridge385: left promiscuous mode [ 2931.896255][T24236] bond403: left promiscuous mode [ 2931.901961][T24236] bridge386: left promiscuous mode [ 2931.935293][T24236] bond404: left promiscuous mode [ 2931.941438][T24236] bridge387: left promiscuous mode [ 2931.977732][T24236] bond405: left promiscuous mode [ 2931.983637][T24236] bridge388: left promiscuous mode [ 2932.018529][T24236] bond406: left promiscuous mode [ 2932.023664][T24236] bridge389: left promiscuous mode [ 2932.039226][T24236] bond407: left promiscuous mode [ 2932.044242][T24236] bridge390: left promiscuous mode [ 2932.083902][T24236] bond408: left promiscuous mode [ 2932.091001][T24236] bridge391: left promiscuous mode [ 2932.126472][T24236] bond409: left promiscuous mode [ 2932.133352][T24236] bridge392: left promiscuous mode [ 2932.172957][T24236] bond410: left promiscuous mode [ 2932.177965][T24236] bridge431: left promiscuous mode [ 2932.215781][T24236] bond411: left promiscuous mode [ 2932.233602][T24236] bond412: left promiscuous mode [ 2932.239300][T24236] bridge433: left promiscuous mode [ 2932.275978][T24236] bond413: left promiscuous mode [ 2932.281457][T24236] bridge434: left promiscuous mode [ 2932.316003][T24236] bond414: left promiscuous mode [ 2932.321644][T24236] bridge435: left promiscuous mode [ 2932.360514][T24236] bond415: left promiscuous mode [ 2932.365559][T24236] bridge437: left promiscuous mode [ 2932.387443][T24236] bond416: left promiscuous mode [ 2932.404595][T24236] bridge438: left promiscuous mode [ 2932.424999][T24236] bond417: left promiscuous mode [ 2932.443025][T24236] bridge439: left promiscuous mode [ 2932.480841][T24236] bond418: left promiscuous mode [ 2932.485848][T24236] bridge441: left promiscuous mode [ 2932.525418][T24236] bond419: left promiscuous mode [ 2932.531114][T24236] bridge442: left promiscuous mode [ 2932.565124][T24236] bond420: left promiscuous mode [ 2932.580812][T24236] bond421: left promiscuous mode [ 2932.585811][T24236] bridge443: left promiscuous mode [ 2932.621246][T24236] bond422: left promiscuous mode [ 2932.626247][T24236] bridge444: left promiscuous mode [ 2932.661716][T24236] bond423: left promiscuous mode [ 2932.666812][T24236] bridge445: left promiscuous mode [ 2932.706226][T24236] bond424: left promiscuous mode [ 2932.711771][T24236] bridge446: left promiscuous mode [ 2932.740400][T24236] bond425: left promiscuous mode [ 2932.745425][T24236] bridge447: left promiscuous mode [ 2932.791150][T24236] bond426: left promiscuous mode [ 2932.796146][T24236] bridge448: left promiscuous mode [ 2932.835452][T24236] bond427: left promiscuous mode [ 2932.841209][T24236] bridge449: left promiscuous mode [ 2932.880773][T24236] bond428: left promiscuous mode [ 2932.885931][T24236] bridge450: left promiscuous mode [ 2932.924799][T24236] bond429: left promiscuous mode [ 2932.930530][T24236] bridge451: left promiscuous mode [ 2932.967280][T24236] bond430: left promiscuous mode [ 2932.972434][T24236] bridge452: left promiscuous mode [ 2933.010327][T24236] bond431: left promiscuous mode [ 2933.027106][T24236] bond432: left promiscuous mode [ 2933.032446][T24236] bridge453: left promiscuous mode [ 2933.065746][T24236] bond433: left promiscuous mode [ 2933.073657][T24236] bridge454: left promiscuous mode [ 2933.111689][T24236] bond434: left promiscuous mode [ 2933.116678][T24236] bridge455: left promiscuous mode [ 2933.154686][T24236] bond435: left promiscuous mode [ 2933.159994][T24236] bridge456: left promiscuous mode [ 2933.217589][T24236] bond436: left promiscuous mode [ 2933.223191][T24236] bridge459: left promiscuous mode [ 2933.260241][T24236] bond437: left promiscuous mode [ 2933.265217][T24236] bridge460: left promiscuous mode [ 2933.308747][T24236] bond438: left promiscuous mode [ 2933.313748][T24236] bridge461: left promiscuous mode [ 2933.343442][T24236] bond439: left promiscuous mode [ 2933.348456][T24236] bridge0: left promiscuous mode [ 2933.392077][T24236] bond440: left promiscuous mode [ 2933.397057][T24236] bridge462: left promiscuous mode [ 2933.434217][T24236] bond441: left promiscuous mode [ 2933.439504][T24236] bridge463: left promiscuous mode [ 2933.476100][T24236] bond442: left promiscuous mode [ 2933.481669][T24236] bridge464: left promiscuous mode [ 2933.519348][T24236] bond443: left promiscuous mode [ 2933.524326][T24236] bridge465: left promiscuous mode [ 2933.561016][T24236] bond444: left promiscuous mode [ 2933.566025][T24236] bridge466: left promiscuous mode [ 2933.603323][T24236] bond445: left promiscuous mode [ 2933.608338][T24236] bridge467: left promiscuous mode [ 2933.631475][T24236] bond446: left promiscuous mode [ 2933.637110][T24236] bridge468: left promiscuous mode [ 2933.681304][T24236] bond447: left promiscuous mode [ 2933.686312][T24236] bridge469: left promiscuous mode [ 2933.708336][T24236] bond448: left promiscuous mode [ 2933.713881][T24236] bridge470: left promiscuous mode [ 2933.753444][T24236] bond449: left promiscuous mode [ 2933.758425][T24236] bridge471: left promiscuous mode [ 2933.794958][T24236] bond450: left promiscuous mode [ 2933.813304][T24236] bond451: left promiscuous mode [ 2933.818294][T24236] bridge472: left promiscuous mode [ 2933.836992][T24236] bond452: left promiscuous mode [ 2933.843095][T24236] bridge473: left promiscuous mode [ 2933.862530][T24236] bond453: left promiscuous mode [ 2933.869485][T24236] bridge474: left promiscuous mode [ 2933.905100][T24236] bond454: left promiscuous mode [ 2933.910503][T24236] bridge475: left promiscuous mode [ 2933.958069][T24236] bond455: left promiscuous mode [ 2933.967802][T24236] bridge476: left promiscuous mode [ 2934.005765][T24236] bond456: left promiscuous mode [ 2934.011036][T24236] bridge477: left promiscuous mode [ 2934.047362][T24236] bond457: left promiscuous mode [ 2934.052788][T24236] bridge478: left promiscuous mode [ 2934.090043][T24236] bond458: left promiscuous mode [ 2934.095033][T24236] bridge479: left promiscuous mode [ 2934.131796][T24236] bond459: left promiscuous mode [ 2934.136824][T24236] vcan1: left promiscuous mode [ 2934.156579][T24236] bond460: left promiscuous mode [ 2934.162301][T24236] bridge480: left promiscuous mode [ 2934.197067][T24236] bond461: left promiscuous mode [ 2934.202364][T24236] bridge481: left promiscuous mode [ 2934.240997][T24236] bond462: left promiscuous mode [ 2934.245986][T24236] bridge482: left promiscuous mode [ 2934.281114][T24236] bond463: left promiscuous mode [ 2934.286251][T24236] bridge483: left promiscuous mode [ 2934.327570][T24236] bond464: left promiscuous mode [ 2934.332854][T24236] bridge484: left promiscuous mode [ 2934.367991][T24236] bond465: left promiscuous mode [ 2934.376053][T24236] bridge485: left promiscuous mode [ 2934.414364][T24236] bond466: left promiscuous mode [ 2934.419695][T24236] bridge486: left promiscuous mode [ 2934.456824][T24236] bond467: left promiscuous mode [ 2934.462402][T24236] bridge487: left promiscuous mode [ 2934.491769][T24236] bond468: left promiscuous mode [ 2934.496792][T24236] bridge488: left promiscuous mode [ 2934.524778][T24236] bond469: left promiscuous mode [ 2934.537024][T24236] bridge490: left promiscuous mode [ 2934.574130][T24236] bond470: left promiscuous mode [ 2934.580005][T24236] bridge491: left promiscuous mode [ 2934.600493][T24236] bond471: left promiscuous mode [ 2934.605516][T24236] bridge492: left promiscuous mode [ 2934.625664][T24236] bond472: left promiscuous mode [ 2934.632603][T24236] bridge493: left promiscuous mode [ 2934.658445][T24236] bond473: left promiscuous mode [ 2934.664486][T24236] bridge494: left promiscuous mode [ 2934.716989][T24236] bond474: left promiscuous mode [ 2934.722670][T24236] bridge495: left promiscuous mode [ 2934.761930][T24236] bond475: left promiscuous mode [ 2934.766962][T24236] bridge496: left promiscuous mode [ 2934.804986][T24236] bond476: left promiscuous mode [ 2934.810338][T24236] bridge497: left promiscuous mode [ 2934.849612][T24236] bond477: left promiscuous mode [ 2934.854632][T24236] bridge498: left promiscuous mode [ 2934.895223][T24236] bond478: left promiscuous mode [ 2934.900630][T24236] bridge499: left promiscuous mode [ 2934.939061][T24236] bond479: left promiscuous mode [ 2934.944270][T24236] bridge500: left promiscuous mode [ 2934.981019][T24236] bond480: left promiscuous mode [ 2934.986082][T24236] bridge501: left promiscuous mode [ 2935.021729][T24236] bond481: left promiscuous mode [ 2935.026732][T24236] bridge502: left promiscuous mode [ 2935.062596][T24236] bond482: left promiscuous mode [ 2935.067712][T24236] bridge503: left promiscuous mode [ 2935.132789][T24236] bridge587: left promiscuous mode [ 2935.138045][T24236] bridge588: left promiscuous mode [ 2935.144270][T24236] bridge588: left allmulticast mode [ 2935.151664][T24236] bridge590: left promiscuous mode [ 2935.156831][T24236] bridge590: left allmulticast mode [ 2935.185025][T24236] bridge595: left promiscuous mode [ 2935.191353][T24236] bridge595: left allmulticast mode [ 2935.217366][T24236] bridge597: left promiscuous mode [ 2935.224718][T24236] bridge597: left allmulticast mode [ 2935.252257][T24236] bridge599: left promiscuous mode [ 2935.257416][T24236] bridge599: left allmulticast mode [ 2935.274764][T24236] bridge600: left promiscuous mode [ 2935.280967][T24236] bridge600: left allmulticast mode [ 2935.286359][T24236] bridge601: left promiscuous mode [ 2935.292358][T24236] bridge601: left allmulticast mode [ 2935.300003][T24236] bridge602: left promiscuous mode [ 2935.305163][T24236] bridge602: left allmulticast mode [ 2935.342920][T24236] bridge737: left promiscuous mode [ 2935.361559][T24236] bridge738: left promiscuous mode [ 2935.374008][T24236] bridge739: left promiscuous mode [ 2935.411976][T24236] bridge740: left promiscuous mode [ 2935.438353][T24236] bridge741: left promiscuous mode [ 2935.566121][T24236] bridge778: left promiscuous mode [ 2935.572087][T24236] bridge778: left allmulticast mode [ 2935.577441][T24236] bridge779: left promiscuous mode [ 2935.588349][T24236] bridge779: left allmulticast mode [ 2935.836870][T24236] bond483: left promiscuous mode [ 2935.855333][T24236] bond484: left promiscuous mode [ 2935.874219][T24236] bond485: left promiscuous mode [ 2935.890513][T24236] bond486: left promiscuous mode [ 2935.906333][T24236] bond487: left promiscuous mode [ 2936.009156][T24236] bond488: left promiscuous mode [ 2936.016550][T24236] bridge886: left allmulticast mode [ 2936.028116][T24236] bond490: left promiscuous mode [ 2936.041912][T24236] bond491: left promiscuous mode [ 2936.059592][T24236] bond492: left promiscuous mode [ 2936.064750][T24236] bridge887: left promiscuous mode [ 2936.133139][T24236] bond494: left promiscuous mode [ 2936.170925][T24236] bond495: left promiscuous mode [ 2936.186563][T24236] bond496: left promiscuous mode [ 2936.203285][T24236] bond497: left promiscuous mode [ 2936.224046][T24236] bond498: left promiscuous mode [ 2936.240725][T24236] bond499: left promiscuous mode [ 2936.252526][T24236] bond500: left promiscuous mode [ 2936.263175][T24236] bond501: left promiscuous mode [ 2936.276308][T24236] bond502: left promiscuous mode [ 2936.293089][T24236] bond503: left promiscuous mode [ 2936.303297][T24236] bond504: left promiscuous mode [ 2936.321587][T24236] bond505: left promiscuous mode [ 2936.343414][T24236] bond506: left promiscuous mode [ 2936.348445][T24236] bridge903: left promiscuous mode [ 2936.395233][T24236] bond507: left promiscuous mode [ 2936.400832][T24236] bridge904: left promiscuous mode [ 2936.434825][T24236] bond508: left promiscuous mode [ 2936.440485][T24236] bridge905: left promiscuous mode [ 2936.490359][T24236] bond510: left promiscuous mode [ 2936.570300][T24236] bond511: left promiscuous mode [ 2936.585966][T24236] bond512: left promiscuous mode [ 2936.602979][T24236] bond513: left promiscuous mode [ 2936.612752][T24236] bond514: left promiscuous mode [ 2936.648527][T24241] validate_nla: 10 callbacks suppressed [ 2936.654266][T24241] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 17:05:01 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4824000010001fff000000db0000000000000000", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x0) 17:05:01 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x14b1, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2936.669004][T24241] workqueue: Failed to create a rescuer kthread for wq "bond1139": -EINTR [ 2936.734598][T24251] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:05:01 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4824000010001fff000000db0000000000000000", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x0) 17:05:01 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x6000}, 0x0) 17:05:01 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x252, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2936.764481][T24251] workqueue: Failed to create a rescuer kthread for wq "bond1060": -EINTR [ 2936.859841][T24255] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2936.887952][T24255] workqueue: Failed to create a rescuer kthread for wq "bond823": -EINTR [ 2936.888261][T24258] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 2936.931229][T24258] workqueue: Failed to create a rescuer kthread for wq "bond701": -EINTR [ 2936.972661][T24280] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2937.015817][T24280] bond1139: entered promiscuous mode [ 2937.022717][T24280] 8021q: adding VLAN 0 to HW filter on device bond1139 [ 2937.071225][T24287] bond1139: (slave bridge1165): making interface the new active one [ 2937.079367][T24287] bridge1165: entered promiscuous mode [ 2937.091305][T24287] bond1139: (slave bridge1165): Enslaving as an active interface with an up link [ 2937.101196][T24291] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2937.144796][T24291] bond823: entered promiscuous mode [ 2937.151335][T24291] 8021q: adding VLAN 0 to HW filter on device bond823 [ 2937.164572][T24294] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2937.209864][T24294] bond1060: entered promiscuous mode [ 2937.224080][T24294] 8021q: adding VLAN 0 to HW filter on device bond1060 [ 2937.336655][T24298] bond1060: (slave bridge1126): making interface the new active one [ 2937.345926][T24298] bridge1126: entered promiscuous mode [ 2937.361322][T24298] bond1060: (slave bridge1126): Enslaving as an active interface with an up link [ 2937.387349][T24230] lo speed is unknown, defaulting to 1000 17:05:03 executing program 0: r0 = socket$inet6_sctp(0xa, 0x1, 0x84) bind$inet6(r0, &(0x7f00004b8fe4)={0xa, 0x4e23, 0x0, @empty}, 0x1c) (async) sendto$inet6(r0, &(0x7f00000002c0)='X', 0x1a000, 0x0, &(0x7f0000000200)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) (async) r2 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r0, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000040)}, &(0x7f0000000240)=0x10) (async) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST(r2, 0x84, 0x1d, &(0x7f0000000080)=ANY=[@ANYBLOB="f77f8000", @ANYRES32=0x0], &(0x7f00000000c0)=0x8) (async) setsockopt$inet_sctp6_SCTP_DELAYED_SACK(r1, 0x84, 0x10, &(0x7f00000003c0)=@sack_info={r4, 0x4000, 0x1}, 0xc) (async) r5 = socket$inet_udp(0x2, 0x2, 0x0) bind$inet(r5, &(0x7f0000000480)={0x2, 0x1, @initdev={0xac, 0x1e, 0x1, 0x0}}, 0x10) (async) setsockopt$sock_int(r5, 0x1, 0x6, &(0x7f0000000140)=0x32, 0x4) connect$inet(r5, &(0x7f0000000280)={0x2, 0x0, @broadcast}, 0x10) (async) sendfile(r0, 0xffffffffffffffff, 0x0, 0x10000000000) (async) sendmmsg$inet(r5, &(0x7f0000002080)=[{{0x0, 0x0, 0x0, 0x0, &(0x7f0000000680)=ANY=[@ANYBLOB="100000000000000000000060070000001c00000000008f2b6395c6be000000001c2065c3c932c1267f82fd1bf1b2885cc1ce88102f564db395b8285f7d9b5ffc9133c5cd03c4cf0317205b36a624cc328e4f3d8c093868e64eb64afa752e19", @ANYRES32=r3, @ANYBLOB="ac1485cb7d1a03c52014aa0066b66c5a6016db9dbb0ae87569b61cac9d80aee4881c9d0000000000000091f50800d2bda4dadee6192e8159224f47eb0162d0a926359ed415921a01aa33cf84cc05110d0a330b3aba806ed1c8bb41e91f30740146a8d032eccf98e899fbd8f0a87c7da60070ba261a0ee0b5e4be7623b3"], 0x30}}], 0x1, 0x0) (async) r6 = socket$netlink(0x10, 0x3, 0x0) sendmsg$netlink(r6, &(0x7f0000006440)={0x0, 0x0, &(0x7f00000063c0)=[{&(0x7f0000000600)=ANY=[@ANYBLOB="340000001000010000000000000000005e000000faff5af608000000", @ANYRES32=0x0, @ANYBLOB="14001b0000000000000000210000000000000001"], 0x34}], 0x1}, 0x0) (async) r7 = socket$inet6(0xa, 0x800000000000002, 0x0) setsockopt$inet6_int(r7, 0x29, 0x46, &(0x7f0000000040)=0x3, 0x4) (async) connect$inet6(r7, &(0x7f0000000000)={0xa, 0x0, 0x0, @local, 0x5}, 0x1c) (async) sendmmsg$inet(r7, &(0x7f0000002240)=[{{0x0, 0x0, 0x0}}], 0x40000e2, 0x0) (async) getsockopt$inet_sctp6_SCTP_RTOINFO(r0, 0x84, 0x0, &(0x7f0000000100)={r4, 0x8000, 0x3ff, 0x7}, &(0x7f0000000140)=0x10) (async) r8 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000240)={0x11, 0x3, &(0x7f0000000640)=ANY=[@ANYRES32=r1], &(0x7f00000000c0)='GPL\x00', 0x4, 0x91, &(0x7f0000000000)=""/145, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000004c0)='contention_begin\x00', r8}, 0x10) (async) ioctl$FS_IOC_GETVERSION(r8, 0x80087601, &(0x7f0000000180)) (async) unshare(0x6c060000) r9 = accept4(r0, &(0x7f0000000400)=@nfc, &(0x7f00000001c0)=0x80, 0x81c00) sendto$inet6(r9, &(0x7f0000000500)="259330942194c017edb00fc5e63de78f0af422ce261a6642858a3527591b7554225841729f1cc478555713800f8c976bc372b7dd22d0616b818da75466de12bb80a24b81562446c462f83a94362a1302", 0x50, 0x88c0, &(0x7f0000000380)={0xa, 0x4e21, 0xce9b, @private0={0xfc, 0x0, '\x00', 0x1}, 0x22b8}, 0x1c) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x0, 0x10, 0xffffffffffffffff, 0x0) (async) setsockopt$EBT_SO_SET_ENTRIES(0xffffffffffffffff, 0x0, 0x80, 0x0, 0x250) socket$inet_smc(0x2b, 0x1, 0x0) 17:05:03 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) socket(0x0, 0x0, 0x0) (async) socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4824000010001fff000000db0000000000000000", @ANYRES32=0x0, @ANYBLOB="0000000000000000280012800b0001006272696467650000180002800c002e00000000000100000005002a0000000000"], 0x48}}, 0x0) 17:05:03 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x43}]}, 0x3c}}, 0x0) 17:05:03 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x6558}, 0x0) 17:05:03 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x14b3, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:05:03 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x260, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2938.541435][T24312] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 17:05:03 executing program 0: r0 = socket$inet6_sctp(0xa, 0x1, 0x84) bind$inet6(r0, &(0x7f00004b8fe4)={0xa, 0x4e23, 0x0, @empty}, 0x1c) (async) bind$inet6(r0, &(0x7f00004b8fe4)={0xa, 0x4e23, 0x0, @empty}, 0x1c) sendto$inet6(r0, &(0x7f00000002c0)='X', 0x1a000, 0x0, &(0x7f0000000200)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) (async) sendto$inet6(r0, &(0x7f00000002c0)='X', 0x1a000, 0x0, &(0x7f0000000200)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) socket$inet6_sctp(0xa, 0x5, 0x84) (async) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) socket$inet6_sctp(0xa, 0x5, 0x84) (async) r2 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r0, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000040)}, &(0x7f0000000240)=0x10) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST(r2, 0x84, 0x1d, &(0x7f0000000080)=ANY=[@ANYBLOB="f77f8000", @ANYRES32=0x0], &(0x7f00000000c0)=0x8) setsockopt$inet_sctp6_SCTP_DELAYED_SACK(r1, 0x84, 0x10, &(0x7f00000003c0)=@sack_info={r4, 0x4000, 0x1}, 0xc) r5 = socket$inet_udp(0x2, 0x2, 0x0) bind$inet(r5, &(0x7f0000000480)={0x2, 0x1, @initdev={0xac, 0x1e, 0x1, 0x0}}, 0x10) setsockopt$sock_int(r5, 0x1, 0x6, &(0x7f0000000140)=0x32, 0x4) connect$inet(r5, &(0x7f0000000280)={0x2, 0x0, @broadcast}, 0x10) sendfile(r0, 0xffffffffffffffff, 0x0, 0x10000000000) (async) sendfile(r0, 0xffffffffffffffff, 0x0, 0x10000000000) sendmmsg$inet(r5, &(0x7f0000002080)=[{{0x0, 0x0, 0x0, 0x0, &(0x7f0000000680)=ANY=[@ANYBLOB="100000000000000000000060070000001c00000000008f2b6395c6be000000001c2065c3c932c1267f82fd1bf1b2885cc1ce88102f564db395b8285f7d9b5ffc9133c5cd03c4cf0317205b36a624cc328e4f3d8c093868e64eb64afa752e19", @ANYRES32=r3, @ANYBLOB="ac1485cb7d1a03c52014aa0066b66c5a6016db9dbb0ae87569b61cac9d80aee4881c9d0000000000000091f50800d2bda4dadee6192e8159224f47eb0162d0a926359ed415921a01aa33cf84cc05110d0a330b3aba806ed1c8bb41e91f30740146a8d032eccf98e899fbd8f0a87c7da60070ba261a0ee0b5e4be7623b3"], 0x30}}], 0x1, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) r6 = socket$netlink(0x10, 0x3, 0x0) sendmsg$netlink(r6, &(0x7f0000006440)={0x0, 0x0, &(0x7f00000063c0)=[{&(0x7f0000000600)=ANY=[@ANYBLOB="340000001000010000000000000000005e000000faff5af608000000", @ANYRES32=0x0, @ANYBLOB="14001b0000000000000000210000000000000001"], 0x34}], 0x1}, 0x0) r7 = socket$inet6(0xa, 0x800000000000002, 0x0) setsockopt$inet6_int(r7, 0x29, 0x46, &(0x7f0000000040)=0x3, 0x4) (async) setsockopt$inet6_int(r7, 0x29, 0x46, &(0x7f0000000040)=0x3, 0x4) connect$inet6(r7, &(0x7f0000000000)={0xa, 0x0, 0x0, @local, 0x5}, 0x1c) sendmmsg$inet(r7, &(0x7f0000002240)=[{{0x0, 0x0, 0x0}}], 0x40000e2, 0x0) (async) sendmmsg$inet(r7, &(0x7f0000002240)=[{{0x0, 0x0, 0x0}}], 0x40000e2, 0x0) getsockopt$inet_sctp6_SCTP_RTOINFO(r0, 0x84, 0x0, &(0x7f0000000100)={r4, 0x8000, 0x3ff, 0x7}, &(0x7f0000000140)=0x10) r8 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000240)={0x11, 0x3, &(0x7f0000000640)=ANY=[@ANYRES32=r1], &(0x7f00000000c0)='GPL\x00', 0x4, 0x91, &(0x7f0000000000)=""/145, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000004c0)='contention_begin\x00', r8}, 0x10) (async) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000004c0)='contention_begin\x00', r8}, 0x10) ioctl$FS_IOC_GETVERSION(r8, 0x80087601, &(0x7f0000000180)) (async) ioctl$FS_IOC_GETVERSION(r8, 0x80087601, &(0x7f0000000180)) unshare(0x6c060000) r9 = accept4(r0, &(0x7f0000000400)=@nfc, &(0x7f00000001c0)=0x80, 0x81c00) sendto$inet6(r9, &(0x7f0000000500)="259330942194c017edb00fc5e63de78f0af422ce261a6642858a3527591b7554225841729f1cc478555713800f8c976bc372b7dd22d0616b818da75466de12bb80a24b81562446c462f83a94362a1302", 0x50, 0x88c0, &(0x7f0000000380)={0xa, 0x4e21, 0xce9b, @private0={0xfc, 0x0, '\x00', 0x1}, 0x22b8}, 0x1c) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x0, 0x10, 0xffffffffffffffff, 0x0) setsockopt$EBT_SO_SET_ENTRIES(0xffffffffffffffff, 0x0, 0x80, 0x0, 0x250) socket$inet_smc(0x2b, 0x1, 0x0) [ 2938.611225][T24312] bond701: entered promiscuous mode [ 2938.618225][T24312] 8021q: adding VLAN 0 to HW filter on device bond701 [ 2938.631598][T24309] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 17:05:03 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x800000, 0x25dfdbfb, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) sendmsg$nl_route_sched(r1, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000}, 0xc, &(0x7f0000000040)={&(0x7f00000006c0)=@newtaction={0x1f50, 0x30, 0x400, 0x70bd29, 0x25dfdbfc, {}, [{0x1f3c, 0x1, [@m_sample={0x8c, 0x1f, 0x0, 0x0, {{0xb}, {0x3c, 0x2, 0x0, 0x1, [@TCA_SAMPLE_RATE={0x8, 0x3, 0x12000000}, @TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0x58}, @TCA_SAMPLE_PSAMPLE_GROUP={0x8, 0x5, 0x15}, @TCA_SAMPLE_PARMS={0x18, 0x2, {0x8, 0x3737, 0x0, 0x0, 0x8}}, @TCA_SAMPLE_RATE={0x8, 0x3, 0x8}]}, {0x27, 0x6, "bc0267c20356ad242f219c0b50c9f6caaa7e2672ffaa1ac7f967bf9f9a2ccad75d3870"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x1}}}}, @m_mpls={0x58, 0x14, 0x0, 0x0, {{0x9}, {0x2c, 0x2, 0x0, 0x1, [@TCA_MPLS_TTL={0x5, 0x7, 0xb7}, @TCA_MPLS_TTL={0x5, 0x7, 0x1}, @TCA_MPLS_TC={0x5, 0x6, 0x6}, @TCA_MPLS_TTL={0x5, 0x7, 0x3}, @TCA_MPLS_TTL={0x5, 0x7, 0xf7}]}, {0x4}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_ctinfo={0x12c, 0x7, 0x0, 0x0, {{0xb}, {0x3c, 0x2, 0x0, 0x1, [@TCA_CTINFO_ACT={0x18, 0x3, {0xdcb6, 0xffff, 0x0, 0x1, 0xc3d}}, @TCA_CTINFO_PARMS_DSCP_STATEMASK={0x8, 0x6, 0x1}, @TCA_CTINFO_PARMS_DSCP_STATEMASK={0x8, 0x6, 0xd4fd}, @TCA_CTINFO_PARMS_DSCP_STATEMASK={0x8, 0x6, 0x3ff}, @TCA_CTINFO_ZONE={0x6, 0x4, 0x3800}]}, {0xc6, 0x6, "46e7943225b3836ee6df90dbe06696c965122e6d417bf835dd49b46c5ff4f10b40b5f5a525c06a0f0d7f04ba965029bae430c92e5b25e8ec4beed0c21103b7e59e2302f71e2f9b95d21d564a1809abd907a7eb7b9b22dba5e3c9ffcf2587ae4d2692365674a34b4c57588c71d62386ff8a7a2d1aff8b367c6dad01a0391e24fd89d4232e91f7b7098f2b9fb680e78c93bbdf65333b930b9079d23f33c90e790f3def125bcfadca32a525ffc31c87e857c98141a4af7ced49aa96a0cb53311ffd029f"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x2, 0x1}}}}, @m_vlan={0x150, 0x0, 0x0, 0x0, {{0x9}, {0x90, 0x2, 0x0, 0x1, [@TCA_VLAN_PARMS={0x1c, 0x2, {{0x7, 0x8e3, 0x3, 0x200, 0x40}, 0x2}}, @TCA_VLAN_PUSH_VLAN_ID={0x6, 0x3, 0x786}, @TCA_VLAN_PUSH_VLAN_PROTOCOL={0x6, 0x4, 0x8100}, @TCA_VLAN_PUSH_VLAN_PRIORITY={0x5, 0x6, 0x1}, @TCA_VLAN_PUSH_VLAN_PRIORITY={0x5, 0x6, 0x3}, @TCA_VLAN_PARMS={0x1c, 0x2, {{0x6, 0x5c, 0x0, 0x2, 0x99e}, 0x1}}, @TCA_VLAN_PUSH_VLAN_PRIORITY={0x5, 0x6, 0x5}, @TCA_VLAN_PUSH_VLAN_PROTOCOL={0x6, 0x4, 0x88a8}, @TCA_VLAN_PUSH_VLAN_ID={0x6, 0x3, 0xd34}, @TCA_VLAN_PARMS={0x1c, 0x2, {{0x6, 0xffff6b54, 0x0, 0x9, 0xd0d5}, 0x1}}]}, {0x96, 0x6, "cf3b657026502dca4b0df61694bb3a0d60d4d7b0de5ba95c7394cfba0718c925f03a5fb64d61383255baabda0867ef293bc56b94e486a9651e8c914567f12196dc3eb67df8f28bed1c36ce590673a1c1f17679eda06a2964bc0f65040bae97b73546994ed404870df6b573aff5076420f6ca756c7e828e539fe2a598efd1fa633eefecee9a034f08581045e18919abbc1ca1"}, {0xc, 0x7, {0x6ad89bd1a04647e6, 0x57359b85e65c6645}}, {0xc, 0x8, {0x1, 0x7}}}}, @m_simple={0xf8, 0x6, 0x0, 0x0, {{0xb}, {0xac, 0x2, 0x0, 0x1, [@TCA_DEF_DATA={0x7, 0x3, '\xda-\x00'}, @TCA_DEF_PARMS={0x18, 0x2, {0x3, 0x8, 0x0, 0x81, 0x9}}, @TCA_DEF_DATA={0xb, 0x3, 'bridge\x00'}, @TCA_DEF_DATA={0x7, 0x3, '^%\x00'}, @TCA_DEF_DATA={0x5, 0x3, '\x00'}, @TCA_DEF_PARMS={0x18, 0x2, {0x7e, 0x7, 0x6, 0x5, 0x80}}, @TCA_DEF_PARMS={0x18, 0x2, {0x2, 0x4, 0x2, 0x122, 0xf2}}, @TCA_DEF_PARMS={0x18, 0x2, {0x4, 0x1, 0x10000000, 0x200, 0x7}}, @TCA_DEF_PARMS={0x18, 0x2, {0xf0f8, 0x2, 0x3, 0x8, 0x101}}, @TCA_DEF_DATA={0x9, 0x3, '/{(\\\x00'}]}, {0x21, 0x6, "6ae0290f464bfd9cbaccee8b1aa9a3bdba1b05b8df66408ee30de38cb6"}, {0xc}, {0xc, 0x8, {0x2, 0x3}}}}, @m_police={0x1ae0, 0x0, 0x0, 0x0, {{0xb}, {0x1a7c, 0x2, 0x0, 0x1, [[@TCA_POLICE_PEAKRATE64={0xc, 0x9, 0x8}, @TCA_POLICE_TBF={0x3c, 0x1, {0x853, 0x3, 0x9, 0x7, 0x1, {0x9e, 0x2, 0x80, 0xc9ea, 0x7f, 0x8001}, {0x3, 0x0, 0x9, 0x9, 0x401, 0x20}, 0x1ff, 0x0, 0x7}}, @TCA_POLICE_RATE64={0xc, 0x8, 0x200}, @TCA_POLICE_AVRATE={0x8, 0x4, 0xff}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x4}], [@TCA_POLICE_PEAKRATE64={0xc, 0x9, 0x8}], [@TCA_POLICE_PEAKRATE64={0xc, 0x9, 0x3a8962a0}, @TCA_POLICE_RESULT={0x8, 0x5, 0x101}], [@TCA_POLICE_RATE64={0xc, 0x8, 0x3}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x10000}, @TCA_POLICE_PEAKRATE64={0xc, 0x9, 0x200}, @TCA_POLICE_PEAKRATE64={0xc, 0x9, 0x100000000}, @TCA_POLICE_PEAKRATE64={0xc, 0x9, 0x7fff}], [@TCA_POLICE_RATE={0x404, 0x2, [0x2, 0xed69, 0x7ff, 0x0, 0x80000000, 0x400, 0x2, 0x9, 0x40, 0x0, 0x7fffffff, 0x1, 0xf2f1, 0x5, 0x6, 0x6, 0x83, 0x2, 0x5, 0x10001, 0xfff, 0x400, 0x7, 0x5, 0x81, 0x9, 0x9, 0x9, 0x4, 0x4, 0xffffffff, 0x9, 0x7, 0xfffffffb, 0x1eb, 0x100, 0x4, 0x400, 0x7fffffff, 0x6, 0x1f, 0x4, 0x81, 0xfffffff9, 0x7, 0x4, 0x2, 0x685c6208, 0x7, 0xb800, 0x3, 0x3ff, 0x7, 0x10001, 0x40, 0x8001, 0xcfdc, 0x7, 0x4, 0x7, 0x9, 0x7, 0xbb, 0x1, 0x7, 0x8, 0x6, 0x4fe4, 0x6, 0x7, 0x7, 0xffffffff, 0x3, 0x0, 0x81, 0x2, 0xfff, 0x2, 0x1000, 0xffffffff, 0x9, 0x101, 0x2, 0x0, 0x6, 0x40, 0x2, 0x8, 0x2, 0x7, 0x7, 0x6, 0xda2, 0x1, 0xad82, 0xfffffff7, 0x9, 0x8, 0x6, 0x9, 0x8, 0x2, 0x6, 0x1000, 0x8, 0x4, 0x80000000, 0x7fffffff, 0x1, 0x0, 0x9, 0x8, 0x1f, 0x7fffffff, 0x200, 0x3, 0x4, 0x889a, 0x4, 0xfff, 0x97, 0xffffffff, 0x80000000, 0x80000001, 0x400, 0x1, 0x1, 0x8001, 0x7b5, 0x5, 0x1b, 0x7fffffff, 0x3, 0x0, 0x32b36f16, 0x400, 0x3ff, 0x2, 0x6, 0x3, 0x6, 0x7, 0x9, 0x8, 0x7, 0xb31f, 0x800, 0x3, 0xfff, 0x3ff, 0x3f, 0x787, 0x36ac, 0x8eb2, 0x7ff, 0x3, 0x37, 0x2, 0xfff, 0x0, 0x7, 0x0, 0xe72, 0xffff, 0x5c, 0x67, 0xffff, 0x8a, 0x1f, 0x10000, 0x0, 0x9, 0x3ff, 0x3, 0xffffff7d, 0xfff, 0x9, 0x9, 0x77e4, 0x0, 0x10001, 0xffff8ce4, 0xffff, 0x7, 0x1, 0x7f, 0x10001, 0x0, 0x3, 0x3ff, 0x3, 0x7, 0x779de509, 0x8, 0x40, 0x4, 0xfffffff9, 0x6, 0x4, 0x5, 0xe92, 0x5, 0x6, 0x8, 0xff, 0x2, 0x936, 0x32, 0x0, 0x8, 0x5, 0x4, 0x65, 0x7912, 0x1f, 0x0, 0x2, 0xe3, 0x5, 0x8001, 0x8, 0x6, 0x4, 0x79c7, 0x1, 0x101, 0x0, 0x2, 0x3f, 0x3, 0x9, 0xfff, 0x2, 0xc6, 0x14000000, 0xb0, 0x1, 0x5, 0xfffffffe, 0x10001, 0x9, 0xf56e, 0x7, 0x3, 0x47f, 0x715, 0x74, 0x8001, 0x7, 0x96a, 0x2, 0xd125, 0x1, 0x3, 0x6, 0x7]}, @TCA_POLICE_AVRATE={0x8}, @TCA_POLICE_AVRATE={0x8, 0x4, 0xffff0001}, @TCA_POLICE_TBF={0x3c, 0x1, {0x9, 0x6, 0x4, 0xffffff17, 0x2, {0x4, 0x0, 0x4, 0x1, 0x2, 0x7}, {0x6, 0x2, 0xf2, 0x4, 0x80, 0x6}, 0x101, 0x8, 0x6}}], [@TCA_POLICE_PEAKRATE64={0xc, 0x9, 0xa7}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x3}, @TCA_POLICE_RATE={0x404, 0x2, [0x8ef546a8, 0x6, 0x3, 0x194b, 0x3e66, 0x1325c36e, 0x3, 0x5, 0x4, 0x1, 0x1014ad66, 0x7, 0x5, 0x0, 0x50, 0x1ff, 0x3c0, 0x10000, 0x8, 0x7, 0x1, 0x6, 0x6, 0x400, 0xff, 0x1, 0x8, 0x400, 0x4, 0x80000001, 0x396, 0xc57, 0x3, 0x80000000, 0x2de16fa0, 0x80000000, 0xa3, 0x0, 0x3647, 0x6, 0x9, 0x1, 0x7, 0x0, 0x8, 0x4, 0x6, 0x4, 0x1, 0x4, 0x10000, 0x2, 0x81, 0x8, 0x4, 0xffffffff, 0xffffffff, 0x2a2c044c, 0x3ff, 0x7fffffff, 0x9, 0x80, 0xffffff65, 0x8, 0x1, 0xc6a, 0x8, 0x401, 0x0, 0x2, 0x3ff, 0x0, 0x8, 0x9, 0xfffffffc, 0x10000, 0x4, 0x4, 0x2, 0x4, 0xec9, 0x1, 0x7d, 0x10000, 0x0, 0x6d7f, 0x6a0, 0x1, 0xba5, 0x1, 0x7fffffff, 0x4, 0x8, 0x4, 0x1000, 0x3, 0x400, 0xcbc9, 0x4, 0x80000001, 0xb403, 0xffff6f9c, 0x59fe, 0x2, 0x1e4, 0x9, 0x7, 0x2, 0x8, 0xffff0000, 0x9, 0x3, 0x7, 0x5, 0x7, 0x1, 0x9, 0xff, 0x7, 0x7, 0x13d9, 0x1, 0x9, 0x7f, 0x0, 0x7fff, 0x80000001, 0x800, 0x9, 0x20, 0x0, 0x7, 0x8, 0x0, 0x2, 0x3, 0x8000000, 0x10000, 0x1, 0x3, 0x6, 0x1, 0x8001, 0x0, 0xfffffffb, 0x2, 0xfffffffb, 0x3ff, 0x4, 0x401, 0xfffffff8, 0x11ab70bf, 0x5, 0xdb, 0x81, 0xfff, 0x10001, 0x7, 0x8, 0x9a, 0x7, 0x7, 0x1f, 0xd633, 0x101, 0xfffffffa, 0xffff, 0x40ad, 0x8, 0xfffffffd, 0x7, 0x0, 0x1, 0x2, 0x1ff, 0x6b, 0xffffff7f, 0x194, 0xd90b, 0x7, 0x4, 0x800, 0x0, 0x8, 0xda82, 0x6, 0xda, 0x6, 0xfffffffc, 0x10001, 0x400, 0x84e, 0x2, 0x3c, 0x7f, 0x6, 0x5, 0x0, 0x5, 0x10000, 0x3, 0x7, 0x4, 0x0, 0x8, 0x1ff, 0x3f, 0xf10, 0x3, 0x7f, 0x6, 0x5, 0x1, 0x0, 0x3, 0x0, 0x8000, 0x1, 0x9, 0x40, 0xff, 0x6, 0x4, 0x1, 0x6, 0x1, 0xdc8a, 0x1, 0x7fff, 0x3f, 0x0, 0x2, 0x4, 0x7, 0x6, 0x3, 0x1, 0xe, 0x0, 0xfff, 0x827c, 0x200, 0x2, 0x7877e776, 0x13, 0x7, 0x8, 0x2, 0x6, 0xfff, 0x401, 0x100, 0x1, 0x6, 0x8, 0x9f]}, @TCA_POLICE_TBF={0x3c, 0x1, {0x41, 0xffffffffdffffffb, 0x389, 0x3f, 0x3, {0x68, 0x0, 0x3, 0x2, 0xff, 0x8f1c}, {0xf9, 0x1, 0x8400, 0x7ff, 0x0, 0x3}, 0x4, 0x2, 0x6}}, @TCA_POLICE_TBF={0x3c, 0x1, {0x8, 0x2, 0x8, 0x200, 0x9, {0x7, 0x1, 0x4, 0x0, 0x1, 0x80}, {0x9, 0x0, 0x0, 0x8, 0x627, 0xffff}, 0x0, 0x8000, 0xffffffff}}], [@TCA_POLICE_AVRATE={0x8, 0x4, 0x3}, @TCA_POLICE_RATE64={0xc, 0x8, 0xe664}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x4}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x254}, @TCA_POLICE_RATE64={0xc, 0x8, 0x5}, @TCA_POLICE_PEAKRATE64={0xc, 0x9, 0x82}, @TCA_POLICE_PEAKRATE64={0xc, 0x9, 0x8}, @TCA_POLICE_RESULT={0x8, 0x5, 0x4}, @TCA_POLICE_RATE64={0xc, 0x8, 0x80}, @TCA_POLICE_RESULT={0x8, 0x5, 0x7fff}], [@TCA_POLICE_PEAKRATE={0x404, 0x3, [0x3, 0x0, 0x3, 0x3, 0x100, 0xffffff21, 0x0, 0x1, 0x6, 0x6, 0x81, 0x4, 0x87, 0x3, 0x20, 0x1f, 0x0, 0x7fffffff, 0xe2, 0x1, 0x8001, 0x9, 0x46, 0x804, 0x8, 0x7fffffff, 0x5, 0x1000, 0x10001, 0x7f, 0xf49, 0x8000, 0x2, 0x4, 0x101, 0x0, 0x12, 0x7fff, 0x99b, 0x2, 0x4, 0x7, 0x2, 0xfff, 0x572, 0x0, 0x6, 0x77, 0x200, 0x40, 0x7fffffff, 0x20, 0x2, 0x20e9, 0x1, 0x401, 0x4, 0x80000001, 0x4, 0x8, 0xffff, 0xfffffe00, 0x784, 0x552, 0x6, 0x1, 0x401, 0x5, 0x23f, 0x1, 0x7, 0x1, 0x4, 0x2, 0xfffffffd, 0x1000, 0x100, 0x95e9, 0x4, 0x80000000, 0xff, 0x805, 0x5885, 0x5, 0x3, 0xfff, 0x80, 0x0, 0x1, 0x7fffffff, 0x7, 0x2, 0xfffff0af, 0x0, 0x7, 0x7fffffff, 0x400, 0x5, 0x4, 0xffffffff, 0x3, 0xac4, 0x7fffffff, 0x0, 0x10, 0x0, 0x10000, 0x81, 0x3, 0x4, 0x9, 0x3a9afca6, 0x8, 0x1, 0x1000, 0xd6a, 0x9, 0xfffffffc, 0xffffffff, 0x20, 0x2, 0x1, 0x7f9e, 0x7, 0x2, 0x6, 0x3, 0x2, 0x400, 0x6, 0x2f4d5dc7, 0x6, 0x6, 0x0, 0x7, 0x7, 0x4f8, 0x3f, 0x8, 0x1ff, 0x6ce4, 0x6, 0x7, 0x6, 0x1000, 0xfffffffd, 0x0, 0x95, 0x6, 0x40, 0x3, 0xa1c4, 0x45, 0x2, 0x7ff, 0x6, 0x5, 0x8, 0xffffffff, 0x6, 0x20, 0x3, 0x23, 0x401, 0x5d, 0x5, 0x3ff, 0x3, 0xfff, 0x5, 0x5, 0x7, 0x800, 0x101, 0x101, 0x3, 0x100, 0x3ff, 0x324e, 0x7ff, 0x6, 0x3, 0x2, 0x0, 0x5, 0x4, 0x840, 0x3e7a, 0x3ff, 0xd0d, 0xe1a, 0x7, 0xffffffff, 0x81, 0x0, 0x9, 0x400, 0x6, 0x100, 0x74a1, 0x6, 0x2, 0x9, 0xacc, 0x5, 0x1, 0xfffffff9, 0x2, 0x1, 0xffffd90d, 0xf6, 0x3, 0x8000, 0x7, 0xe, 0x8, 0x4, 0x81, 0xa1, 0xffffffff, 0x1ff, 0x6, 0x1, 0x4, 0x2, 0x8b, 0x3, 0x132, 0x2, 0x80000000, 0x8001, 0x3ff, 0x29100, 0x0, 0x3, 0x2, 0x0, 0x4, 0x3f, 0x4, 0x4481918, 0x4, 0x1, 0x4, 0xd48c, 0x7, 0xfffff0f0, 0xfffff801, 0x1, 0x3, 0xfffffff7, 0x100, 0x6, 0x200, 0x1ff]}, @TCA_POLICE_TBF={0x3c, 0x1, {0x5, 0x3, 0xd7, 0x401, 0x0, {0x1, 0x0, 0x2, 0x9, 0x1, 0x1}, {0x5, 0x1, 0x90f, 0x4, 0x6f6e, 0x7fff}, 0x2, 0x0, 0xfffff8a2}}, @TCA_POLICE_PEAKRATE64={0xc, 0x9, 0xe3d}, @TCA_POLICE_RATE={0x404, 0x2, [0x6, 0x8, 0xbe, 0x0, 0x4, 0x7, 0x0, 0x9, 0x8, 0x0, 0x3, 0x0, 0x30, 0x80000001, 0x6, 0x400, 0xfff, 0xfffffff9, 0x6, 0x5, 0x4, 0x4, 0x6, 0x4, 0x0, 0x0, 0x1, 0x8, 0x1, 0xfffffffe, 0x80000000, 0x5c135a0, 0xffffffff, 0x3f, 0x6, 0x80000000, 0x6, 0x0, 0x10001, 0x80, 0xffffffff, 0xfffffffa, 0xf8, 0xe155, 0x200, 0x2, 0xe1e, 0x1f, 0x5, 0x6, 0xffffffff, 0x9, 0x6, 0x0, 0xa86d, 0x1000, 0x6, 0xff, 0x10000, 0x7, 0x0, 0x80000001, 0x4, 0x8000, 0x3, 0xcff, 0x1, 0x6, 0xf0e7, 0xfffff508, 0x81, 0x5d, 0x5, 0x7, 0x100, 0x2, 0xa014, 0x401, 0x7db, 0x80000001, 0x400, 0x400, 0xba, 0x3, 0x1, 0x401, 0x4, 0x9, 0x6, 0x6b, 0x3, 0x2, 0x3, 0x1f, 0x0, 0xa0000, 0xfffffeff, 0x5493fbbe, 0x8782, 0x3ff, 0x8, 0xffffff80, 0x0, 0x8, 0x2, 0xffffff00, 0x9d0b, 0x80, 0x25dc292, 0x9, 0x6, 0x4, 0x7, 0x81, 0x0, 0x5, 0x5, 0x1000, 0x0, 0x7, 0x401, 0x6c50b29c, 0x2, 0x2, 0x0, 0x80000000, 0x8, 0xa56e, 0x7, 0x401, 0x9, 0x8, 0x6, 0x800, 0x7, 0x7ff, 0x80000, 0xa5, 0x8001, 0x40, 0x401, 0x2, 0x0, 0x7f, 0xffffffe0, 0xfffffff8, 0x800, 0x40, 0x3ff, 0x2, 0x99dd, 0x8, 0x8000, 0x80000001, 0x5, 0x4, 0x133, 0x1, 0x8000, 0xcc, 0x8, 0x1f, 0x9, 0x400, 0x7, 0x3f, 0x80000000, 0x7, 0xdc5, 0x7, 0x0, 0x400, 0xa79, 0xbc26, 0x9, 0x7d, 0x9, 0x8000, 0x240, 0x8a, 0x800, 0x16, 0x7fffffff, 0x4, 0x1ff, 0x4, 0x9, 0x0, 0x6, 0xef, 0xe6c1, 0x4, 0x0, 0x6, 0x2, 0x2, 0x1, 0xe9e, 0x9, 0x9, 0x3ff, 0xffff, 0x0, 0x4, 0x6, 0x401, 0x7, 0x7, 0x7ff, 0x800, 0x6, 0x7ff, 0x6, 0x1, 0x800, 0x3, 0x5, 0xc3e7, 0x10001, 0x81, 0x0, 0x3b529ad0, 0x3, 0x2, 0x1000, 0xe6a6, 0x9a, 0x3, 0x5, 0xff, 0x7, 0x5, 0x4, 0x44, 0xd990, 0x8, 0x5, 0x7, 0x6, 0x5, 0x6, 0xea, 0x9, 0x74, 0x40, 0x7, 0x5, 0x9, 0xd6, 0x6e, 0x7b0, 0x298a, 0x401, 0x4, 0xeabf, 0x1000]}, @TCA_POLICE_PEAKRATE={0x404, 0x3, [0x4, 0x70, 0x5, 0x0, 0xd39, 0xd5e, 0xf80000, 0x2, 0x1, 0xffff, 0x1, 0x1, 0x5, 0x75478d4c, 0xeb, 0xffffffff, 0x80000001, 0x7fff, 0x5, 0x400, 0xffffffff, 0x1, 0x1000, 0x5, 0x5, 0x2398, 0x2, 0x2ee, 0x5, 0x9a, 0x1, 0x7fff, 0xd6a6, 0x1ff, 0x1, 0xc83f, 0xfffffffc, 0x7fffffff, 0x1, 0x8001, 0x7, 0x5ec1, 0x81, 0x210, 0x9, 0x6, 0x1, 0x10000, 0xfffffe01, 0x8001, 0x9846, 0x4, 0x5, 0x7fffffff, 0x3ad, 0x40, 0x8, 0x5, 0x3, 0x8, 0x20, 0x401, 0x8, 0xff90, 0x2, 0x5ae5, 0x0, 0x0, 0x10000, 0xfffffff7, 0x80000001, 0x7fe571e5, 0x8b4a, 0x3, 0x8, 0xfffffffa, 0x8, 0x39, 0xd9, 0xa638, 0x8, 0x2, 0xd9, 0xbddf, 0xfb, 0x8001, 0x100, 0x200, 0x401, 0x7, 0x0, 0x3f8c, 0x2, 0x3f, 0x6, 0x2, 0x3, 0xfffffff9, 0x2, 0x6, 0xffffff9c, 0x400, 0x2, 0x5, 0x7ff, 0x9, 0x8, 0x1, 0x10000000, 0x9, 0x8000, 0x2, 0x3, 0x3f, 0x3, 0x0, 0x8, 0x800, 0x521, 0xff, 0x7, 0x9, 0x3, 0x6, 0x9, 0x2, 0x1, 0x7, 0x9, 0x9, 0xe429, 0x10001, 0xb9, 0x7, 0x1f, 0x0, 0x81, 0x4, 0x7f, 0x5e30, 0x3f, 0x6, 0x40, 0xf269, 0x3, 0x4, 0x4, 0x6, 0xecc, 0x8001, 0x8001, 0x100, 0x2, 0xfffff001, 0x8, 0x3, 0x4, 0xa563, 0xf4, 0x3, 0x3f, 0x8e, 0x7f, 0x1, 0x1, 0x5, 0x200, 0x2, 0x59284c9d, 0x9, 0x0, 0x2, 0x0, 0xd07, 0x1, 0x1400000, 0x80000000, 0x1, 0x6, 0x4, 0x5, 0x6, 0x2, 0x0, 0x9, 0x1, 0x5, 0x3, 0xfffffffb, 0x2, 0x5, 0x1, 0x20, 0x10001, 0x8, 0x3, 0x2, 0x9, 0x7e, 0x4, 0xbe, 0x8a5b, 0x8, 0xffffffff, 0x80000000, 0x1f, 0x8, 0x8, 0x10001, 0x1, 0x7, 0x1, 0x6, 0xfffffff7, 0xc5, 0xfffffbff, 0x27d, 0x33b, 0x7fff, 0xd56, 0x80000001, 0x8001, 0x200, 0x81, 0xffffff80, 0xda5, 0x3ff, 0xfff, 0x821, 0xa3, 0x0, 0x689997fb, 0x200, 0x80, 0x77943bb2, 0x5, 0x0, 0x1, 0x9, 0x8001, 0x0, 0x0, 0x6, 0x6, 0x0, 0x7, 0xf1b, 0x1ff, 0xffff, 0xa90, 0x3f, 0x6d3b, 0x9e, 0x4, 0x5, 0x20]}, @TCA_POLICE_RATE64={0xc, 0x8, 0x1ff}, @TCA_POLICE_RATE={0x404, 0x2, [0x3, 0x85, 0x1ff, 0x7705, 0x33a047e8, 0x800, 0x7, 0x60, 0x3f, 0x3, 0x6, 0x7, 0xfff00000, 0x7ff, 0x7, 0x5, 0x2400, 0x6, 0x1f, 0x9, 0x1, 0x7, 0x7, 0x7, 0x1, 0x8, 0x3, 0x3, 0x4, 0xfffffff9, 0x1, 0x6, 0xff800000, 0x200, 0x0, 0x6, 0x7, 0x6, 0x44b598aa, 0x3, 0x2, 0x4, 0xf065, 0xfffff4a1, 0x2, 0x4, 0x4, 0x391, 0x3, 0x0, 0x5, 0x4, 0x7fffffff, 0x3, 0x5, 0x7, 0xea6, 0x8, 0x7, 0x6ccd, 0x4, 0x101, 0xe3a, 0x800, 0x3, 0x8, 0x8, 0x0, 0x7, 0x1, 0x3c694712, 0xe, 0x3, 0x5c9, 0x7, 0x3ff, 0x1ff, 0x4, 0x7, 0x101, 0x8, 0x7f20, 0x1ff, 0x9, 0xfffff697, 0x3, 0x80, 0x7, 0x2, 0x0, 0x5, 0xfdfb, 0x1, 0x1ff, 0x555f351f, 0x6, 0x200, 0x7, 0xffff52e3, 0x3, 0x1000, 0x0, 0x1, 0x8, 0x7fff, 0x4, 0xfe9, 0xffffffe0, 0x8, 0xb7a2, 0x4, 0x8000, 0x7edb34fa, 0x40, 0x4, 0x20, 0x100, 0x3, 0x9, 0x3, 0xc918, 0x5, 0x6, 0x10000, 0x0, 0x2, 0x4, 0x5, 0x7, 0x7, 0x5, 0x54d8, 0x80, 0x8001, 0x8, 0x9, 0x7fff, 0x100, 0x2, 0x0, 0x1000, 0x4, 0x7, 0x7ff, 0x80000001, 0x3, 0xfff, 0x8, 0x8, 0x3ff, 0x0, 0x56f2, 0x1ff, 0x41d, 0x1, 0x2, 0xffffffff, 0x10001, 0x3, 0x5, 0x9, 0x2, 0x0, 0x27, 0x6, 0x5, 0x3, 0xffff0000, 0x8000, 0x1, 0xfffffc00, 0x4, 0x3, 0xffffffff, 0x10000, 0x7fffffff, 0x3, 0x0, 0x679, 0xc89e, 0x2, 0x7ef, 0x7e89d348, 0x40, 0x3, 0x556, 0x20000, 0x4b, 0x3, 0xb0, 0x23, 0x4, 0x9, 0x40, 0x0, 0x7ff, 0x3f, 0x3ff, 0x9, 0x199, 0x8001, 0x400, 0x1000000, 0x7, 0x1f, 0x99d, 0x7f, 0x6, 0x6, 0x7, 0x4, 0x3, 0x10000, 0x1c, 0xffffff9f, 0x4, 0x80000000, 0x0, 0x101, 0x1, 0x9, 0x0, 0x400, 0x7e, 0x400, 0x3, 0x9, 0xc27, 0x4302, 0x2, 0x3f, 0x2, 0x7, 0x1, 0x1, 0x6, 0x9, 0x2, 0x79e, 0x1f, 0x400, 0x3f, 0x5, 0xf74, 0x1, 0x7fffffff, 0x3f1, 0x2, 0x2, 0x9, 0x4, 0x6, 0x84f, 0xad, 0x8, 0x1]}, @TCA_POLICE_RATE64={0xc, 0x8, 0x2}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x3}]]}, {0x39, 0x6, "5506fc7875613b1144520a9ae82ad3bf2882ece64ca6bf2449a5c60f5ca3aadccaefaba501d195c728b71b203ea27976db2c718b2a"}, {0xc}, {0xc, 0x8, {0x1, 0x3}}}}]}]}, 0x1f50}, 0x1, 0x0, 0x0, 0x20008010}, 0x20000040) [ 2938.681387][T24309] bond824: entered promiscuous mode [ 2938.687113][T24309] 8021q: adding VLAN 0 to HW filter on device bond824 [ 2938.706990][T24308] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 17:05:03 executing program 0: r0 = socket$inet6_sctp(0xa, 0x1, 0x84) bind$inet6(r0, &(0x7f00004b8fe4)={0xa, 0x4e23, 0x0, @empty}, 0x1c) sendto$inet6(r0, &(0x7f00000002c0)='X', 0x1a000, 0x0, &(0x7f0000000200)={0xa, 0x4e23, 0x0, @loopback}, 0x1c) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) r2 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r0, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x0, &(0x7f0000000040)}, &(0x7f0000000240)=0x10) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST(r2, 0x84, 0x1d, &(0x7f0000000080)=ANY=[@ANYBLOB="f77f8000", @ANYRES32=0x0], &(0x7f00000000c0)=0x8) setsockopt$inet_sctp6_SCTP_DELAYED_SACK(r1, 0x84, 0x10, &(0x7f00000003c0)=@sack_info={r4, 0x4000, 0x1}, 0xc) r5 = socket$inet_udp(0x2, 0x2, 0x0) bind$inet(r5, &(0x7f0000000480)={0x2, 0x1, @initdev={0xac, 0x1e, 0x1, 0x0}}, 0x10) setsockopt$sock_int(r5, 0x1, 0x6, &(0x7f0000000140)=0x32, 0x4) connect$inet(r5, &(0x7f0000000280)={0x2, 0x0, @broadcast}, 0x10) sendfile(r0, 0xffffffffffffffff, 0x0, 0x10000000000) sendmmsg$inet(r5, &(0x7f0000002080)=[{{0x0, 0x0, 0x0, 0x0, &(0x7f0000000680)=ANY=[@ANYBLOB="100000000000000000000060070000001c00000000008f2b6395c6be000000001c2065c3c932c1267f82fd1bf1b2885cc1ce88102f564db395b8285f7d9b5ffc9133c5cd03c4cf0317205b36a624cc328e4f3d8c093868e64eb64afa752e19", @ANYRES32=r3, @ANYBLOB="ac1485cb7d1a03c52014aa0066b66c5a6016db9dbb0ae87569b61cac9d80aee4881c9d0000000000000091f50800d2bda4dadee6192e8159224f47eb0162d0a926359ed415921a01aa33cf84cc05110d0a330b3aba806ed1c8bb41e91f30740146a8d032eccf98e899fbd8f0a87c7da60070ba261a0ee0b5e4be7623b3"], 0x30}}], 0x1, 0x0) r6 = socket$netlink(0x10, 0x3, 0x0) sendmsg$netlink(r6, &(0x7f0000006440)={0x0, 0x0, &(0x7f00000063c0)=[{&(0x7f0000000600)=ANY=[@ANYBLOB="340000001000010000000000000000005e000000faff5af608000000", @ANYRES32=0x0, @ANYBLOB="14001b0000000000000000210000000000000001"], 0x34}], 0x1}, 0x0) r7 = socket$inet6(0xa, 0x800000000000002, 0x0) setsockopt$inet6_int(r7, 0x29, 0x46, &(0x7f0000000040)=0x3, 0x4) connect$inet6(r7, &(0x7f0000000000)={0xa, 0x0, 0x0, @local, 0x5}, 0x1c) sendmmsg$inet(r7, &(0x7f0000002240)=[{{0x0, 0x0, 0x0}}], 0x40000e2, 0x0) getsockopt$inet_sctp6_SCTP_RTOINFO(r0, 0x84, 0x0, &(0x7f0000000100)={r4, 0x8000, 0x3ff, 0x7}, &(0x7f0000000140)=0x10) r8 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000240)={0x11, 0x3, &(0x7f0000000640)=ANY=[@ANYRES32=r1], &(0x7f00000000c0)='GPL\x00', 0x4, 0x91, &(0x7f0000000000)=""/145, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000004c0)='contention_begin\x00', r8}, 0x10) ioctl$FS_IOC_GETVERSION(r8, 0x80087601, &(0x7f0000000180)) unshare(0x6c060000) r9 = accept4(r0, &(0x7f0000000400)=@nfc, &(0x7f00000001c0)=0x80, 0x81c00) sendto$inet6(r9, &(0x7f0000000500)="259330942194c017edb00fc5e63de78f0af422ce261a6642858a3527591b7554225841729f1cc478555713800f8c976bc372b7dd22d0616b818da75466de12bb80a24b81562446c462f83a94362a1302", 0x50, 0x88c0, &(0x7f0000000380)={0xa, 0x4e21, 0xce9b, @private0={0xfc, 0x0, '\x00', 0x1}, 0x22b8}, 0x1c) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x0, 0x10, 0xffffffffffffffff, 0x0) setsockopt$EBT_SO_SET_ENTRIES(0xffffffffffffffff, 0x0, 0x80, 0x0, 0x250) socket$inet_smc(0x2b, 0x1, 0x0) [ 2938.816125][T24308] bond1061: entered promiscuous mode [ 2938.823292][T24308] 8021q: adding VLAN 0 to HW filter on device bond1061 [ 2938.872951][T24310] bond1140: entered promiscuous mode [ 2938.882434][T24310] 8021q: adding VLAN 0 to HW filter on device bond1140 17:05:03 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0x44}]}, 0x3c}}, 0x0) 17:05:03 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x28a, 0xa, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8}]}, 0x3c}, 0x1, 0x8100}, 0x0) 17:05:04 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x261, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2939.013659][T24323] bond1061: (slave bridge1127): making interface the new active one [ 2939.022175][T24323] bridge1127: entered promiscuous mode [ 2939.035503][T24323] bond1061: (slave bridge1127): Enslaving as an active interface with an up link 17:05:04 executing program 5: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4c504a6e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000840)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x14b6, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 17:05:04 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket(0x0, 0x0, 0x0) sendmsg$nl_route(0xffffffffffffffff, 0x0, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=@newlink={0x48, 0x10, 0xffffff1f, 0x800000, 0x25dfdbfb, {}, [@IFLA_LINKINFO={0x28, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x18, 0x2, 0x0, 0x1, [@IFLA_BR_MULTI_BOOLOPT={0xc}, @IFLA_BR_MCAST_STATS_ENABLED={0x5}]}}}]}, 0x48}}, 0x0) (async) [ 2939.116581][T24325] bond1140: (slave bridge1166): making interface the new active one [ 2939.124967][T24325] bridge1166: entered promiscuous mode [ 2939.141539][T24325] bond1140: (slave bridge1166): Enslaving as an active interface with an up link sendmsg$nl_route_sched(r1, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20000}, 0xc, &(0x7f0000000040)={&(0x7f00000006c0)=@newtaction={0x1f50, 0x30, 0x400, 0x70bd29, 0x25dfdbfc, {}, [{0x1f3c, 0x1, [@m_sample={0x8c, 0x1f, 0x0, 0x0, {{0xb}, {0x3c, 0x2, 0x0, 0x1, [@TCA_SAMPLE_RATE={0x8, 0x3, 0x12000000}, @TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0x58}, @TCA_SAMPLE_PSAMPLE_GROUP={0x8, 0x5, 0x15}, @TCA_SAMPLE_PARMS={0x18, 0x2, {0x8, 0x3737, 0x0, 0x0, 0x8}}, @TCA_SAMPLE_RATE={0x8, 0x3, 0x8}]}, {0x27, 0x6, "bc0267c20356ad242f219c0b50c9f6caaa7e2672ffaa1ac7f967bf9f9a2ccad75d3870"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x1}}}}, @m_mpls={0x58, 0x14, 0x0, 0x0, {{0x9}, {0x2c, 0x2, 0x0, 0x1, [@TCA_MPLS_TTL={0x5, 0x7, 0xb7}, @TCA_MPLS_TTL={0x5, 0x7, 0x1}, @TCA_MPLS_TC={0x5, 0x6, 0x6}, @TCA_MPLS_TTL={0x5, 0x7, 0x3}, @TCA_MPLS_TTL={0x5, 0x7, 0xf7}]}, {0x4}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_ctinfo={0x12c, 0x7, 0x0, 0x0, {{0xb}, {0x3c, 0x2, 0x0, 0x1, [@TCA_CTINFO_ACT={0x18, 0x3, {0xdcb6, 0xffff, 0x0, 0x1, 0xc3d}}, @TCA_CTINFO_PARMS_DSCP_STATEMASK={0x8, 0x6, 0x1}, @TCA_CTINFO_PARMS_DSCP_STATEMASK={0x8, 0x6, 0xd4fd}, @TCA_CTINFO_PARMS_DSCP_STATEMASK={0x8, 0x6, 0x3ff}, @TCA_CTINFO_ZONE={0x6, 0x4, 0x3800}]}, {0xc6, 0x6, "46e7943225b3836ee6df90dbe06696c965122e6d417bf835dd49b46c5ff4f10b40b5f5a525c06a0f0d7f04ba965029bae430c92e5b25e8ec4beed0c21103b7e59e2302f71e2f9b95d21d564a1809abd907a7eb7b9b22dba5e3c9ffcf2587ae4d2692365674a34b4c57588c71d62386ff8a7a2d1aff8b367c6dad01a0391e24fd89d4232e91f7b7098f2b9fb680e78c93bbdf65333b930b9079d23f33c90e790f3def125bcfadca32a525ffc31c87e857c98141a4af7ced49aa96a0cb53311ffd029f"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x2, 0x1}}}}, @m_vlan={0x150, 0x0, 0x0, 0x0, {{0x9}, {0x90, 0x2, 0x0, 0x1, [@TCA_VLAN_PARMS={0x1c, 0x2, {{0x7, 0x8e3, 0x3, 0x200, 0x40}, 0x2}}, @TCA_VLAN_PUSH_VLAN_ID={0x6, 0x3, 0x786}, @TCA_VLAN_PUSH_VLAN_PROTOCOL={0x6, 0x4, 0x8100}, @TCA_VLAN_PUSH_VLAN_PRIORITY={0x5, 0x6, 0x1}, @TCA_VLAN_PUSH_VLAN_PRIORITY={0x5, 0x6, 0x3}, @TCA_VLAN_PARMS={0x1c, 0x2, {{0x6, 0x5c, 0x0, 0x2, 0x99e}, 0x1}}, @TCA_VLAN_PUSH_VLAN_PRIORITY={0x5, 0x6, 0x5}, @TCA_VLAN_PUSH_VLAN_PROTOCOL={0x6, 0x4, 0x88a8}, @TCA_VLAN_PUSH_VLAN_ID={0x6, 0x3, 0xd34}, @TCA_VLAN_PARMS={0x1c, 0x2, {{0x6, 0xffff6b54, 0x0, 0x9, 0xd0d5}, 0x1}}]}, {0x96, 0x6, "cf3b657026502dca4b0df61694bb3a0d60d4d7b0de5ba95c7394cfba0718c925f03a5fb64d61383255baabda0867ef293bc56b94e486a9651e8c914567f12196dc3eb67df8f28bed1c36ce590673a1c1f17679eda06a2964bc0f65040bae97b73546994ed404870df6b573aff5076420f6ca756c7e828e539fe2a598efd1fa633eefecee9a034f08581045e18919abbc1ca1"}, {0xc, 0x7, {0x6ad89bd1a04647e6, 0x57359b85e65c6645}}, {0xc, 0x8, {0x1, 0x7}}}}, @m_simple={0xf8, 0x6, 0x0, 0x0, {{0xb}, {0xac, 0x2, 0x0, 0x1, [@T VM DIAGNOSIS: Warning: Permanently added '10.128.1.122' (ECDSA) to the list of known hosts. lock-classes: 6204 [max: 8192] direct dependencies: 49326 [max: 131072] indirect dependencies: 997213 all direct dependencies: 2392494 dependency chains: 257420 [max: 262144] dependency chain hlocks used: 1310719 [max: 1310720] dependency chain hlocks lost: 1 in-hardirq chains: 93 in-softirq chains: 3053 in-process chains: 254273 stack-trace entries: 325921 [max: 1048576] number of stack traces: 16955 number of stack hash chains: 10561 combined max dependencies:hardirq-safe locks: 59 hardirq-unsafe locks: 5424 softirq-safe locks: 362 softirq-unsafe locks: 5000 irq-safe locks: 373 irq-unsafe locks: 5424 hardirq-read-safe locks: 5 hardirq-read-unsafe locks: 212 softirq-read-safe locks: 21 softirq-read-unsafe locks: 194 irq-read-safe locks: 21 irq-read-unsafe locks: 212 uncategorized locks: 429 unused locks: 0 max locking depth: 18 max bfs queue depth: 717 max lock class index: 6203 debug_locks: 0 zapped classes: 2835 zapped lock chains: 15568 large chain blocks: 0 all lock classes: FD: 37 BD: 1 +.+.: fill_pool_map-wait-type-override ->pool_lock#2 ->pool_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->init_task.mems_allowed_seq.seqcount ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 2 BD: 4864 -.-.: &obj_hash[i].lock ->pool_lock FD: 1 BD: 4864 -.-.: pool_lock FD: 880 BD: 17 +.+.: cgroup_mutex ->pcpu_alloc_mutex ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&obj_hash[i].lock ->cgroup_file_kn_lock ->css_set_lock ->&c->lock ->&____s->seqcount ->blkcg_pol_mutex ->&zone->lock ->percpu_counters_lock ->shrinker_rwsem ->&base->lock ->batched_entropy_u8.lock ->&pgdat->memcg_lru.lock ->devcgroup_mutex ->cpu_hotplug_lock ->&n->list_lock ->fs_reclaim ->&rq->__lock ->cgroup_rstat_lock ->cpuset_rwsem ->cpuset_rwsem.waiters.lock ->cpuset_rwsem.rss.gp_wait.lock ->&dom->lock ->batched_entropy_u32.lock ->cgroup_idr_lock ->task_group_lock ->(wq_completion)cpuset_migrate_mm ->&wq->mutex ->&____s->seqcount#2 ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->cgroup_mutex.wait_lock ->remove_cache_srcu ->stock_lock ->&sem->wait_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 3687 -.-.: (console_sem).lock FD: 228 BD: 12 +.+.: console_lock ->console_owner_lock ->resource_lock ->pool_lock#2 ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount ->&c->lock ->kbd_event_lock ->vga_lock ->(console_sem).lock ->fs_reclaim ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#11 ->&fb_info->lock ->&base->lock ->&rq->__lock ->subsys mutex#5 ->&helper->lock ->&helper->damage_lock ->&lock->wait_lock ->&p->pi_lock ->vt_event_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 11 ....: console_srcu FD: 300 BD: 135 ++++: cpu_hotplug_lock ->jump_label_mutex ->static_call_mutex ->cpuhp_state_mutex ->wq_pool_mutex ->freezer_mutex ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) ->smpboot_threads_lock ->&obj_hash[i].lock ->&pool->lock ->&x->wait#4 ->&rq->__lock ->mem_hotplug_lock ->mem_hotplug_lock.waiters.lock ->mem_hotplug_lock.rss.gp_wait.lock ->cpu_hotplug_lock.rss.gp_wait.lock ->rcu_node_0 ->&swhash->hlist_mutex ->pmus_lock ->pcp_batch_high_lock ->&xa->xa_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_attach_mutex ->pcpu_alloc_mutex ->relay_channels_mutex ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->sparse_irq_lock ->&x->wait#6 ->cpuhp_state-up ->cpu_hotplug_lock.waiters.lock ->stop_cpus_mutex ->&wq->mutex ->flush_lock ->&md->mutex ->&irq_desc_lock_class ->xps_map_mutex ->css_set_lock ->cpuset_rwsem ->cpuset_rwsem.waiters.lock ->cpuset_rwsem.rss.gp_wait.lock ->cgroup_threadgroup_rwsem ->cgroup_threadgroup_rwsem.waiters.lock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->jump_label_mutex.wait_lock ->&list->lock#5 ->(work_completion)(flush) ->&x->wait#10 ->wq_pool_mutex.wait_lock FD: 59 BD: 143 +.+.: jump_label_mutex ->text_mutex ->text_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->jump_label_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 58 BD: 136 +.+.: static_call_mutex ->text_mutex ->text_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&rq->__lock FD: 57 BD: 155 +.+.: text_mutex ->ptlock_ptr(page)#2 ->&rq->__lock ->text_mutex.wait_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&pool->lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 234 BD: 10 +.+.: console_mutex ->syslog_lock ->(console_sem).lock ->&port_lock_key ->console_lock ->console_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->console_srcu ->&root->kernfs_rwsem ->kernfs_notify_lock ->&rq->__lock FD: 28 BD: 11 +.+.: syslog_lock ->&rq->__lock FD: 1 BD: 3664 -.-.: console_owner_lock FD: 5 BD: 3663 -.-.: console_owner ->console_owner_lock ->&port_lock_key FD: 1 BD: 156 ..-.: input_pool.lock FD: 255 BD: 136 +.+.: cpuhp_state_mutex ->cpuhp_state-down ->cpuhp_state-up ->&p->pi_lock ->&rq->__lock ->&x->wait#6 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->&zone->lock ->crypto_alg_sem ->scomp_lock FD: 302 BD: 1 +.+.: clocksource_mutex ->watchdog_lock ->cpu_hotplug_lock ->(console_sem).lock ->&rq->__lock FD: 1 BD: 2 ....: watchdog_lock FD: 4 BD: 139 ++++: resource_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 ....: cache_disable_lock FD: 1 BD: 3998 +.+.: pgd_lock FD: 30 BD: 294 +.+.: init_mm.page_table_lock ->pgd_lock ->&obj_hash[i].lock FD: 1 BD: 1 ....: early_pfn_lock FD: 176 BD: 1 +.+.: acpi_ioapic_lock ->ioapic_lock ->(console_sem).lock ->ioapic_mutex FD: 2 BD: 156 ....: ioapic_lock ->i8259A_lock FD: 1 BD: 1 +.+.: syscore_ops_lock FD: 1 BD: 1 ....: map_entries_lock FD: 1 BD: 7 ....: devtree_lock FD: 3 BD: 4044 ..-.: pcpu_lock ->stock_lock FD: 131 BD: 73 +.+.: param_lock ->rate_ctrl_mutex ->disk_events_mutex FD: 1 BD: 4827 ..-.: base_crng.lock FD: 1 BD: 1 ....: rcu_read_lock FD: 1 BD: 1 ....: crng_init_wait.lock FD: 2 BD: 1 ....: zonelist_update_seq ->zonelist_update_seq.seqcount FD: 1 BD: 2 ....: zonelist_update_seq.seqcount FD: 1 BD: 1 +.+.: dmar_global_lock FD: 2 BD: 4747 -.-.: &zone->lock ->&____s->seqcount FD: 1 BD: 4807 .-.-: &____s->seqcount FD: 3 BD: 4139 +.+.: &pcp->lock ->&zone->lock FD: 1 BD: 4880 -.-.: pool_lock#2 FD: 134 BD: 217 +.+.: pcpu_alloc_mutex ->pcpu_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&c->lock ->&obj_hash[i].lock ->&rq->__lock ->pcpu_alloc_mutex.wait_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&pool->lock ->&____s->seqcount#2 ->&n->list_lock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->purge_vmap_area_lock ->&meta->lock ->pgd_lock ->key ->percpu_counters_lock ->stock_lock ->pool_lock FD: 6 BD: 4779 -.-.: &n->list_lock ->&c->lock FD: 5 BD: 4816 -.-.: &c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 160 BD: 85 +.+.: slab_mutex ->pool_lock#2 ->&c->lock ->&n->list_lock ->pcpu_alloc_mutex ->&zone->lock ->&____s->seqcount ->fs_reclaim ->&rq->__lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->lock ->&root->kernfs_rwsem ->&k->list_lock ->&____s->seqcount#2 FD: 3 BD: 5 ....: batched_entropy_u64.lock ->crngs.lock FD: 2 BD: 4826 ..-.: crngs.lock ->base_crng.lock FD: 4 BD: 1 ....: espfix_init_mutex ->&zone->lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4001 ..-.: percpu_counters_lock FD: 9 BD: 4093 +.+.: &mm->page_table_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->stock_lock FD: 11 BD: 4098 +.+.: ptlock_ptr(page) ->lock#4 FD: 53 BD: 4122 +.+.: ptlock_ptr(page)#2 ->lock#4 ->ptlock_ptr(page)#2/1 ->key ->&____s->seqcount ->pool_lock#2 ->lock#5 ->&folio_wait_table[i] ->&lruvec->lru_lock ->&mapping->private_lock ->&obj_hash[i].lock FD: 133 BD: 3 +.+.: trace_types_lock ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->&obj_hash[i].lock FD: 1 BD: 1 ....: panic_notifier_list.lock FD: 1 BD: 1 ....: die_chain.lock FD: 135 BD: 4 +.+.: trace_event_sem ->trace_event_ida.xa_lock ->&rq->__lock ->trace_event_sem.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 3 BD: 3738 ..-.: batched_entropy_u32.lock ->crngs.lock FD: 27 BD: 4663 -.-.: &rq->__lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock/1 ->&cfs_rq->removed.lock ->&rt_b->rt_runtime_lock ->&cp->lock ->pool_lock#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rt_rq->rt_runtime_lock ->cid_lock FD: 1 BD: 4664 ....: &cfs_b->lock FD: 28 BD: 141 ....: init_task.pi_lock ->&rq->__lock FD: 1 BD: 1 ....: init_task.vtime_seqcount FD: 145 BD: 140 +.+.: wq_pool_mutex ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&wq->mutex ->&obj_hash[i].lock ->&pool->lock/1 ->fs_reclaim ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->wq_pool_attach_mutex ->(console_sem).lock ->&xa->xa_lock ->&n->list_lock ->&____s->seqcount#2 ->wq_pool_mutex.wait_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->remove_cache_srcu ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock FD: 33 BD: 152 +.+.: &wq->mutex ->&pool->lock ->&pool->lock/1 ->&x->wait#10 ->&rq->__lock FD: 30 BD: 4367 -.-.: &pool->lock ->&obj_hash[i].lock ->&p->pi_lock ->pool_lock#2 ->&base->lock ->wq_mayday_lock FD: 32 BD: 4235 ..-.: &pool->lock/1 ->&obj_hash[i].lock ->&p->pi_lock ->pool_lock#2 ->&base->lock ->&x->wait#10 ->wq_mayday_lock FD: 129 BD: 61 ++++: shrinker_rwsem ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount ->fs_reclaim ->&rq->__lock ->&obj_hash[i].lock ->krc.lock ->&cfs_rq->removed.lock ->rcu_node_0 ->f2fs_list_lock ->tk_core.seq.seqcount ->&sbi->s_es_lock ->&journal->j_list_lock ->&rcu_state.expedited_wq FD: 1 BD: 4208 -.-.: rcu_node_0 FD: 5 BD: 82 -.-.: rcu_state.barrier_lock ->rcu_node_0 ->&obj_hash[i].lock FD: 31 BD: 3 ....: &rnp->exp_poll_lock FD: 9 BD: 5 ....: trace_event_ida.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock FD: 1 BD: 1 ....: trigger_cmd_mutex FD: 1 BD: 157 ....: i8259A_lock FD: 129 BD: 137 +.+.: irq_domain_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 33 BD: 293 +.+.: free_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 ->init_mm.page_table_lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount FD: 1 BD: 294 +.+.: vmap_area_lock FD: 6 BD: 153 -.-.: &irq_desc_lock_class ->i8259A_lock ->vector_lock ->ioapic_lock ->mask_lock ->tmp_mask_lock FD: 37 BD: 81 +.+.: vmap_purge_lock ->purge_vmap_area_lock ->free_vmap_area_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 31 BD: 226 +.+.: purge_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 2 BD: 80 +.+.: cpa_lock ->pgd_lock FD: 5 BD: 2 -.-.: timekeeper_lock ->tk_core.seq.seqcount ->pvclock_gtod_data FD: 4 BD: 4731 ----: tk_core.seq.seqcount ->&obj_hash[i].lock ->pvclock_gtod_data FD: 13 BD: 4742 -.-.: &base->lock ->&obj_hash[i].lock FD: 187 BD: 138 +.+.: pmus_lock ->pcpu_alloc_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&cpuctx_mutex ->fs_reclaim ->&k->list_lock ->lock ->&root->kernfs_rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&x->wait#9 ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#29 FD: 1 BD: 138 +.+.: &swhash->hlist_mutex FD: 1 BD: 139 +.+.: &cpuctx_mutex FD: 1 BD: 2 ....: tty_ldiscs_lock FD: 2 BD: 13 ....: kbd_event_lock ->led_lock FD: 1 BD: 14 ..-.: led_lock FD: 1 BD: 13 ....: vga_lock FD: 3 BD: 3666 -.-.: &port_lock_key ->&port->lock ->&tty->write_wait FD: 3 BD: 11 ....: console_srcu_srcu_usage.lock ->&obj_hash[i].lock FD: 1 BD: 41 ..-.: &ACCESS_PRIVATE(sdp, lock) FD: 43 BD: 3 +.+.: init_task.alloc_lock ->init_fs.lock FD: 37 BD: 1 +.+.: acpi_ioremap_lock ->pool_lock#2 ->resource_lock ->memtype_lock ->free_vmap_area_lock ->vmap_area_lock FD: 1 BD: 2 +.+.: memtype_lock FD: 1 BD: 17 ....: semaphore->lock FD: 1 BD: 13 ....: *(&acpi_gbl_reference_count_lock) FD: 9 BD: 1 ....: clockevents_lock ->tk_core.seq.seqcount ->tick_broadcast_lock ->i8253_lock FD: 3 BD: 2 -...: tick_broadcast_lock ->jiffies_lock FD: 1 BD: 2 ....: i8253_lock FD: 37 BD: 12 +.+.: &desc->request_mutex ->&irq_desc_lock_class ->&rq->__lock ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 175 BD: 2 +.+.: ioapic_mutex ->&domain->mutex FD: 174 BD: 139 +.+.: &domain->mutex ->pool_lock#2 ->vector_lock ->&irq_desc_lock_class ->i8259A_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->sparse_irq_lock ->fs_reclaim FD: 1 BD: 156 -.-.: vector_lock FD: 2 BD: 3 -.-.: jiffies_lock ->jiffies_seq.seqcount FD: 1 BD: 4 -.-.: jiffies_seq.seqcount FD: 16 BD: 4700 -.-.: hrtimer_bases.lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 29 BD: 1 -.-.: log_wait.lock ->&p->pi_lock FD: 19 BD: 4196 +.+.: sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 38 BD: 2 +.+.: tomoyo_policy_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&rq->__lock FD: 2 BD: 1 ....: aa_secids.xa_lock ->pool_lock#2 FD: 1 BD: 2 +.+.: aa_buffers_lock FD: 1086 BD: 4 ++++: pernet_ops_rwsem ->stack_depot_init_mutex ->crngs.lock ->net_rwsem ->proc_inum_ida.xa_lock ->pool_lock#2 ->proc_subdir_lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->sysctl_lock ->pcpu_alloc_mutex ->net_generic_ids.xa_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->&obj_hash[i].lock ->k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK ->nl_table_lock ->nl_table_wait.lock ->rtnl_mutex ->uevent_sock_mutex ->pool_lock ->&net->rules_mod_lock ->slab_mutex ->&zone->lock ->batched_entropy_u32.lock ->percpu_counters_lock ->cache_list_lock ->tk_core.seq.seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&pool->lock/1 ->running_helpers_waitq.lock ->&sn->pipefs_sb_lock ->krc.lock ->&rq->__lock ->&s->s_inode_list_lock ->nf_hook_mutex ->cpu_hotplug_lock ->hwsim_netgroup_ida.xa_lock ->nf_connlabels_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->nf_ct_ecache_mutex ->nf_log_mutex ->ipvs->est_mutex ->&base->lock ->__ip_vs_app_mutex ->&hashinfo->lock#2 ->&net->ipv6.ip6addrlbl_table.lock ->(console_sem).lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->wq_pool_mutex ->pcpu_lock ->&list->lock#4 ->&dir->lock#2 ->ptype_lock ->k-clock-AF_TIPC ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&this->receive_lock ->once_lock ->nf_ct_proto_mutex ->k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->conn_lock ->&call->waitq ->&rx->call_lock ->&rxnet->call_lock ->&n->list_lock ->remove_cache_srcu ->rtnl_mutex.wait_lock ->&p->pi_lock ->rdma_nets.xa_lock ->devices_rwsem ->pcpu_alloc_mutex.wait_lock ->stock_lock ->rcu_node_0 ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&net->nsid_lock ->ebt_mutex ->&xt[i].mutex ->&nft_net->commit_mutex ->netns_bpf_mutex ->&rnp->exp_wq[2] ->(&net->fs_probe_timer) ->&net->cells_lock ->(&net->cells_timer) ->bit_wait_table + i ->(&net->fs_timer) ->(wq_completion)kafsd ->&wq->mutex ->k-clock-AF_RXRPC ->&local->services_lock ->(wq_completion)krxrpcd ->rlock-AF_RXRPC ->&x->wait ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->&ent->pde_unload_lock ->ovs_mutex ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->(work_completion)(&ovs_net->dp_notify_work) ->&srv->idr_lock ->&rnp->exp_wq[3] ->rcu_state.exp_mutex.wait_lock ->(work_completion)(&tn->work) ->&rnp->exp_lock ->&rnp->exp_wq[0] ->&tn->nametbl_lock ->&rnp->exp_wq[1] ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&c->work)->work) ->(wq_completion)krdsd ->(work_completion)(&rtn->rds_tcp_accept_w) ->rds_tcp_conn_lock ->loop_conns_lock ->(wq_completion)l2tp ->rcu_state.barrier_mutex ->(&rxnet->peer_keepalive_timer) ->(work_completion)(&rxnet->peer_keepalive_work) ->(&rxnet->service_conn_reap_timer) ->&x->wait#10 ->dev_base_lock ->lweventlist_lock ->napi_hash_lock ->netdev_unregistering_wq.lock ->&fn->fou_lock ->ipvs->sync_mutex ->hwsim_radio_lock ->rdma_nets_rwsem ->k-clock-AF_NETLINK ->&nlk->wait ->wlock-AF_NETLINK ->&hn->hn_lock ->&pnettable->lock ->&pnetids_ndev->lock ->k-sk_lock-AF_INET6/1 ->&net->sctp.addr_wq_lock ->&lock->wait_lock ->rcu_state.exp_mutex ->&rcu_state.expedited_wq ->rcu_state.barrier_mutex.wait_lock ->quarantine_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->&sn->gssp_lock ->&cd->hash_lock ->(&net->can.stattimer) ->xfrm_state_gc_work ->&net->xfrm.xfrm_state_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->ip6_fl_lock ->(&net->ipv6.ip6_fib_timer) ->__ip_vs_mutex ->(&ipvs->dest_trash_timer) ->(work_completion)(&(&ipvs->expire_nodest_conn_work)->work) ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&ipvs->est_reload_work)->work) ->nfnl_subsys_ipset ->recent_lock ->hashlimit_mutex ->(work_completion)(&(&cnet->ecache.dwork)->work) ->sysfs_symlink_target_lock ->kernfs_idr_lock ->tcp_metrics_lock ->k-clock-AF_INET ->(work_completion)(&net->xfrm.policy_hash_work) ->&net->xfrm.xfrm_policy_lock ->(work_completion)(&net->xfrm.state_hash_work) ->&list->lock#2 ->genl_sk_destructing_waitq.lock ->&meta->lock ->&sem->wait_lock ->uevent_sock_mutex.wait_lock ->key#25 ->nf_nat_proto_mutex ->&table->hash[i].lock ->pin_fs_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key#3 ->&sb->s_type->i_lock_key#7 ->mount_lock ->(inetaddr_chain).rwsem ->inet6addr_chain.lock ->(work_completion)(&(&local->roc_work)->work) ->(work_completion)(&local->restart_work) ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&local->sched_scan_stopped_work) ->(work_completion)(&local->radar_detected_work) ->&list->lock#19 ->&rdev->wiphy.mtx ->(work_completion)(&rfkill->uevent_work) ->(work_completion)(&rfkill->sync_work) ->dev_pm_qos_sysfs_mtx ->&k->k_lock ->subsys mutex#40 ->&x->wait#9 ->dpm_list_mtx ->&dev->power.lock ->deferred_probe_mutex ->device_links_lock ->&rfkill->lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->triggers_list_lock ->leds_list_lock ->(work_completion)(&rdev->wiphy_work) ->(work_completion)(&rdev->conn_work) ->(work_completion)(&rdev->event_work) ->(work_completion)(&(&rdev->dfs_update_channels_wk)->work) ->(work_completion)(&(&rdev->background_cac_done_wk)->work) ->(work_completion)(&rdev->destroy_work) ->(work_completion)(&rdev->propagate_radar_detect_wk) ->(work_completion)(&rdev->propagate_cac_done_wk) ->(work_completion)(&rdev->mgmt_registrations_update_wk) ->(work_completion)(&rdev->background_cac_abort_wk) ->subsys mutex#55 ->gdp_mutex ->(&local->sta_cleanup) ->&wg->device_update_lock ->&bat_priv->forw_bcast_list_lock ->&bat_priv->forw_bat_list_lock ->&bat_priv->gw.list_lock ->(work_completion)(&(&bat_priv->bat_v.ogm_wq)->work) ->&bat_priv->bat_v.ogm_buff_mutex ->&bat_priv->tvlv.container_list_lock ->&bat_priv->tvlv.handler_list_lock ->(work_completion)(&(&bat_priv->nc.work)->work) ->key#17 ->key#18 ->(work_completion)(&(&bat_priv->dat.work)->work) ->&hash->list_locks[i] ->(work_completion)(&(&bat_priv->bla.work)->work) ->key#21 ->(work_completion)(&(&bat_priv->mcast.work)->work) ->(work_completion)(&(&bat_priv->tt.work)->work) ->key#16 ->key#20 ->&bat_priv->tt.req_list_lock ->&bat_priv->tt.changes_list_lock ->&bat_priv->tt.roam_list_lock ->(work_completion)(&(&bat_priv->orig_work)->work) ->key#19 ->wq_mayday_lock ->vmap_area_lock ->purge_vmap_area_lock ->&sb->s_type->i_lock_key#23 ->rename_lock.seqcount ->nf_conntrack_mutex ->pgd_lock ->key ->nf_hook_mutex.wait_lock ->nf_ct_proto_mutex.wait_lock ->&device->compat_devs_mutex ->subsys mutex#83 ->&device->unregistration_lock ->(&timer.timer) ->rds_conn_lock ->(work_completion)(&(&cp->cp_send_w)->work) ->(work_completion)(&(&cp->cp_recv_w)->work) ->(work_completion)(&cp->cp_down_w) ->rds_cong_lock ->&x->lock ->(&x->rtimer) ->crypto_default_null_skcipher_lock ->rdma_nets_rwsem.wait_lock ->devices_rwsem.wait_lock ->&rdev->bss_lock FD: 28 BD: 75 +.+.: stack_depot_init_mutex ->&rq->__lock FD: 150 BD: 3526 ++++: net_rwsem ->&list->lock#2 ->&rq->__lock ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->quarantine_lock ->&table->lock#4 ->&ndev->lock ->rcu_node_0 ->&data->lock ->&c->lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 12 BD: 96 ..-.: proc_inum_ida.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&____s->seqcount#2 FD: 871 BD: 71 +.+.: rtnl_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->pcpu_alloc_mutex ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->nl_table_lock ->nl_table_wait.lock ->net_rwsem ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->&rq->__lock ->krc.lock ->stack_depot_init_mutex ->cpu_hotplug_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_mutex ->crngs.lock ->&pool->lock/1 ->&cfs_rq->removed.lock ->lweventlist_lock ->&pool->lock ->rtnl_mutex.wait_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&k->k_lock ->quarantine_lock ->param_lock ->(console_sem).lock ->&rdev->wiphy.mtx ->&base->lock ->subsys mutex#57 ->&sdata->sec_mtx ->&local->iflist_mtx#2 ->lock#7 ->failover_lock ->&tn->lock ->&idev->mc_lock ->pool_lock ->&ndev->lock ->rcu_node_0 ->&pnettable->lock ->smc_ib_devices.mutex ->&(&net->nexthop.notifier_chain)->rwsem ->reg_requests_lock ->reg_pending_beacons_lock ->rlock-AF_NETLINK ->(inetaddr_validator_chain).rwsem ->(inetaddr_chain).rwsem ->_xmit_LOOPBACK ->netpoll_srcu ->&in_dev->mc_tomb_lock ->&im->lock ->fib_info_lock ->cbs_list_lock ->(inet6addr_validator_chain).rwsem ->&net->ipv6.addrconf_hash_lock ->&ifa->lock ->&tb->tb6_lock ->&n->list_lock ->&dev_addr_list_lock_key ->napi_hash_lock ->lapb_list_lock ->remove_cache_srcu ->x25_neigh_list_lock ->console_owner_lock ->console_owner ->_xmit_ETHER ->_xmit_SLIP ->&vi->refill_lock ->noop_qdisc.q.lock ->&sem->wait_lock ->&rfkill->lock ->&local->chanctx_mtx ->&dev->tx_global_lock ->&rnp->exp_wq[3] ->&sch->q.lock ->class ->(&tbl->proxy_timer) ->_xmit_VOID ->_xmit_X25 ->&lapbeth->up_lock ->&lapb->lock ->&rnp->exp_wq[0] ->&dir->lock ->&ul->lock#2 ->&n->lock ->dev_addr_sem ->_xmit_IEEE802154 ->reg_indoor_lock ->&nr_netdev_addr_lock_key ->listen_lock ->pcpu_alloc_mutex.wait_lock ->&r->consumer_lock ->&mm->mmap_lock ->pcpu_lock ->(switchdev_blocking_notif_chain).rwsem ->&br->hash_lock ->nf_hook_mutex ->j1939_netdev_lock ->&bat_priv->tvlv.handler_list_lock ->&bat_priv->tvlv.container_list_lock ->&bat_priv->softif_vlan_list_lock ->key#16 ->&bat_priv->tt.changes_list_lock ->kernfs_idr_lock ->&rnp->exp_wq[2] ->&rnp->exp_wq[1] ->tk_core.seq.seqcount ->&wq->mutex ->init_lock ->deferred_lock ->target_list_lock ->&br->lock ->&pn->hash_lock ->&rcu_state.expedited_wq ->&meta->lock ->&hard_iface->bat_iv.ogm_buff_mutex ->ptype_lock ->_xmit_NONE ->lock#9 ->team->team_lock_key#3 ->&hsr->list_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->mount_lock ->&xa->xa_lock#13 ->&dev_addr_list_lock_key#3/1 ->req_lock ->&x->wait#11 ->subsys mutex#81 ->bpf_devs_lock ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->&devlink_port->type_lock ->&vn->sock_lock ->devnet_rename_sem ->&nft_net->commit_mutex ->&ent->pde_unload_lock ->&wg->device_update_lock ->_xmit_SIT ->&bridge_netdev_addr_lock_key/1 ->_xmit_TUNNEL ->_xmit_IPGRE ->_xmit_TUNNEL6 ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#2/1 ->_xmit_ETHER/1 ->pgd_lock ->key ->percpu_counters_lock ->&nn->netlink_tap_lock ->&batadv_netdev_addr_lock_key/1 ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/1 ->&ipvlan->addrs_lock ->&macsec_netdev_addr_lock_key/1 ->key#21 ->&bat_priv->tt.commit_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET ->k-slock-AF_INET ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&ul->lock ->&____s->seqcount#2 ->&xs->mutex ->stock_lock ->&net->xdp.lock ->mirred_list_lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->&idev->mc_report_lock ->&pnn->pndevs.lock ->&pnn->routes.lock ->dev_pm_qos_sysfs_mtx ->deferred_probe_mutex ->device_links_lock ->&caifn->caifdevs.lock ->uevent_sock_mutex.wait_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rcu_state.exp_mutex.wait_lock ->(&br->hello_timer) ->(&br->topology_change_timer) ->(&br->tcn_timer) ->(work_completion)(&(&br->gc_work)->work) ->(&brmctx->ip4_mc_router_timer) ->(&brmctx->ip4_other_query.timer) ->(&brmctx->ip4_own_query.timer) ->(&brmctx->ip6_mc_router_timer) ->(&brmctx->ip6_other_query.timer) ->(&brmctx->ip6_own_query.timer) ->__ip_vs_mutex ->flowtable_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&net->rules_mod_lock ->(&mrt->ipmr_expire_timer) ->(work_completion)(&ht->run_work) ->&ht->mutex ->nf_connlabels_lock ->(work_completion)(&wdev->disconnect_wk) ->(work_completion)(&wdev->pmsr_free_wk) ->(work_completion)(&sdata->activate_links_work) ->&rdev->dev_wait ->&hwstats->hwsdev_list_lock ->qdisc_mod_lock ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->(work_completion)(&port->bc_work) ->&table->hash[i].lock ->k-clock-AF_INET ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->raw_notifier_lock ->bcm_notifier_lock ->isotp_notifier_lock ->act_mod_lock ->&tn->idrinfo->lock ->&bridge_netdev_addr_lock_key ->&br->multicast_lock ->(work_completion)(&br->mcast_gc_work) ->rcu_state.barrier_mutex ->&pn->all_ppp_mutex ->&ppp->rlock ->&ppp->wlock ->&dev_addr_list_lock_key#4 ->&pf->rwait ->mrt_lock ->_xmit_NETROM#2 ->&this->info_list_lock ->&tun->lock ->wlock-AF_UNSPEC ->elock-AF_UNSPEC ->&net->xfrm.xfrm_state_lock ->&net->xfrm.xfrm_policy_lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->ifalias_mutex ->sk_lock-AF_INET ->slock-AF_INET ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->&sb->s_type->i_lock_key#23 ->rename_lock.seqcount ->&tn->idrinfo->lock#2 ->&p->tcfa_lock ->prog_idr_lock ->bpf_lock ->&pmc->lock ->&fq->lock ->&local->iflist_mtx ->&app->lock ->(&app->join_timer) ->(&app->periodic_timer) ->&list->lock#14 ->(&app->join_timer)#2 ->&app->lock#2 ->&list->lock#15 ->(work_completion)(&(&priv->scan_result)->work) ->(work_completion)(&(&priv->connect)->work) ->(&hsr->prune_timer) ->(&hsr->announce_timer) ->key#19 ->&bat_priv->forw_bcast_list_lock ->&bat_priv->forw_bat_list_lock ->(work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->(&pmctx->ip6_mc_router_timer) ->(&pmctx->ip4_mc_router_timer) ->k-clock-AF_INET6 ->&r->consumer_lock#2 ->&wg->socket_update_lock ->(work_completion)(&(&bond->mii_work)->work) ->(work_completion)(&(&bond->arp_work)->work) ->(work_completion)(&(&bond->alb_work)->work) ->(work_completion)(&(&bond->ad_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->(work_completion)(&(&bond->slave_arr_work)->work) ->(work_completion)(&port->wq) ->(work_completion)(&(&slave->notify_work)->work) ->(&mp->timer) ->team->team_lock_key#7 ->netlbl_unlhsh_lock ->nr_list_lock ->nr_neigh_list_lock ->&bpq_netdev_addr_lock_key ->key#26 ->&vlan_netdev_addr_lock_key/2 ->&lock->wait_lock ->&block->lock ->&block->cb_lock ->(work_completion)(&q->work) ->sk_lock-AF_INET6 ->slock-AF_INET6 ->__ip_vs_mutex.wait_lock ->&chain->filter_chain_lock ->cls_mod_lock ->&block->proto_destroy_lock ->&p->alloc_lock ->&list->lock#2 ->(&q->adapt_timer) ->sk_lock-AF_CAN ->slock-AF_CAN ->(&q->perturb_timer) ->sk_lock-AF_UNSPEC ->slock-AF_UNSPEC ->mfc_unres_lock ->&newf->file_lock ->&sb->s_type->i_lock_key#4 ->tomoyo_ss ->&data->lock ->&tn->idrinfo->lock#3 ->&bond->mode_lock ->team->team_lock_key#8 ->&tipc_net(net)->bclock ->&batadv_netdev_addr_lock_key ->team->team_lock_key#9 ->&net->ipv4.ra_mutex ->_xmit_PIMREG ->mrt_lock#2 ->&c->lock#2 ->team->team_lock_key#10 ->team->team_lock_key#11 ->&dev_addr_list_lock_key#3/2 ->&tn->idrinfo->lock#4 ->zones_mutex ->flow_indr_block_lock ->wq_mayday_lock ->hrtimer_bases.lock ->free_vmap_area_lock ->vmap_area_lock ->purge_vmap_area_lock ->&pgdat->kswapd_wait FD: 2 BD: 1 +.-.: drivers/char/random.c:1010 ->input_pool.lock FD: 52 BD: 237 +.+.: lock ->kernfs_idr_lock ->cgroup_idr_lock ->pidmap_lock ->drm_minor_lock ->&file_private->table_lock ->&q->queue_lock ->sg_index_lock ->map_idr_lock ->prog_idr_lock ->btf_idr_lock ->&group->inotify_data.idr_lock ->link_idr_lock ->sctp_assocs_id_lock ->&tn->idr_lock FD: 29 BD: 4115 +.+.: kernfs_idr_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 FD: 134 BD: 240 ++++: &root->kernfs_rwsem ->&root->kernfs_iattr_rwsem ->kernfs_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&cfs_rq->removed.lock ->quarantine_lock ->inode_hash_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&c->lock ->&____s->seqcount ->remove_cache_srcu ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&sem->wait_lock ->&sb->s_type->i_lock_key#30 ->&sb->s_type->i_lock_key#31 ->kernfs_rename_lock ->&xa->xa_lock#12 ->stock_lock ->&____s->seqcount#2 ->&p->pi_lock ->&meta->lock ->kernfs_pr_cont_lock ->&rcu_state.expedited_wq ->&base->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 5 ++++: file_systems_lock FD: 130 BD: 244 ++++: &root->kernfs_iattr_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->iattr_mutex ->&sem->wait_lock ->tk_core.seq.seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->pool_lock#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock FD: 5 BD: 52 +.+.: sb_lock ->unnamed_dev_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 141 BD: 1 +.+.: &type->s_umount_key/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&dentry->d_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start FD: 1 BD: 40 +.+.: list_lrus_mutex FD: 1 BD: 53 ....: unnamed_dev_ida.xa_lock FD: 1 BD: 26 +.+.: &sbinfo->stat_lock FD: 56 BD: 4136 +.+.: &sb->s_type->i_lock_key ->&dentry->d_lock ->&xa->xa_lock#6 ->&dentry->d_lock/1 ->bit_wait_table + i FD: 1 BD: 4128 +.+.: &s->s_inode_list_lock FD: 40 BD: 4195 +.+.: &dentry->d_lock ->&wq ->&dentry->d_lock/1 ->&wq#2 ->&lru->node[i].lock ->sysctl_lock ->&wq#3 ->&dentry->d_lock/2 ->&p->pi_lock FD: 2 BD: 28 ....: mnt_id_ida.xa_lock ->pool_lock#2 FD: 45 BD: 172 +.+.: mount_lock ->mount_lock.seqcount ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 43 BD: 172 +.+.: mount_lock.seqcount ->&new_ns->poll ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&p->pi_lock FD: 139 BD: 1 +.+.: &type->s_umount_key#2/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key#2 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 4136 +.+.: &sb->s_type->i_lock_key#2 ->&dentry->d_lock FD: 1 BD: 2 ..-.: ucounts_lock FD: 42 BD: 194 +.+.: init_fs.lock ->init_fs.seq.seqcount ->&dentry->d_lock FD: 1 BD: 163 +.+.: init_fs.seq.seqcount FD: 139 BD: 1 +.+.: &type->s_umount_key#3/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 55 BD: 4138 +.+.: &sb->s_type->i_lock_key#3 ->&dentry->d_lock ->&xa->xa_lock#6 FD: 1 BD: 137 +.+.: cpuhp_state-down FD: 248 BD: 137 +.+.: cpuhp_state-up ->smpboot_threads_lock ->sparse_irq_lock ->&swhash->hlist_mutex ->pmus_lock ->&x->wait#5 ->&obj_hash[i].lock ->hrtimer_bases.lock ->wq_pool_mutex ->rcu_node_0 ->&rq->__lock ->jump_label_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#24 ->&c->lock ->&zone->lock ->&____s->seqcount ->subsys mutex#25 ->&k->k_lock ->subsys mutex#78 ->&base->lock ->swap_slots_cache_mutex FD: 1 BD: 98 ++++: proc_subdir_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#4/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->&____s->seqcount ->sb_lock ->&sb->s_type->i_lock_key#4 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 73 +.+.: &sb->s_type->i_lock_key#4 ->&dentry->d_lock FD: 32 BD: 148 ....: cgroup_file_kn_lock ->kernfs_notify_lock FD: 34 BD: 147 ..-.: css_set_lock ->cgroup_file_kn_lock ->&p->pi_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 9 BD: 238 +...: cgroup_idr_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 167 BD: 141 ++++: cpuset_rwsem ->cpuset_rwsem.rss.gp_wait.lock ->rcu_node_0 ->callback_lock ->&p->pi_lock ->&obj_hash[i].lock ->&rq->__lock ->jump_label_mutex ->&p->alloc_lock ->cpuset_attach_wq.lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->fs_reclaim ->pool_lock#2 ->sched_domains_mutex FD: 3 BD: 142 ..-.: cpuset_rwsem.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 142 ....: callback_lock FD: 1 BD: 138 ....: cpuset_rwsem.waiters.lock FD: 135 BD: 18 +.+.: blkcg_pol_mutex ->pcpu_alloc_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 3 BD: 4814 ..-.: batched_entropy_u8.lock ->crngs.lock FD: 1 BD: 4108 +.+.: &pgdat->memcg_lru.lock FD: 1 BD: 19 +.+.: devcgroup_mutex FD: 49 BD: 138 +.+.: freezer_mutex ->freezer_lock ->&rq->__lock ->rcu_node_0 ->freezer_mutex.wait_lock FD: 1 BD: 1 +.+.: &pool->lock#2 FD: 301 BD: 2 +.+.: spec_ctrl_mutex ->cpu_hotplug_lock ->(console_sem).lock FD: 48 BD: 289 +.+.: rcu_state.exp_mutex ->rcu_node_0 ->rcu_state.exp_wake_mutex ->&obj_hash[i].lock ->&pool->lock ->&rnp->exp_wq[2] ->&rq->__lock ->&rnp->exp_wq[3] ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->rcu_state.exp_mutex.wait_lock ->&rcu_state.expedited_wq ->pool_lock#2 ->&cfs_rq->removed.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock FD: 38 BD: 294 +.+.: rcu_state.exp_wake_mutex ->rcu_node_0 ->&rnp->exp_lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&rq->__lock ->rcu_state.exp_wake_mutex.wait_lock ->&pool->lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 305 +.+.: &rnp->exp_lock FD: 29 BD: 307 ....: &rnp->exp_wq[0] ->&p->pi_lock FD: 29 BD: 300 ....: &rnp->exp_wq[1] ->&p->pi_lock FD: 1 BD: 141 ....: init_sighand.siglock FD: 1 BD: 3 +.+.: init_files.file_lock FD: 13 BD: 247 ....: pidmap_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 192 BD: 137 ++++: cgroup_threadgroup_rwsem ->css_set_lock ->&p->pi_lock ->tk_core.seq.seqcount ->tasklist_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&sighand->siglock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->rcu_node_0 ->inode_hash_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#30 ->&root->kernfs_iattr_rwsem ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->cpuset_rwsem ->cpuset_rwsem.waiters.lock ->cpuset_rwsem.rss.gp_wait.lock ->&p->alloc_lock ->freezer_mutex ->&rnp->exp_lock ->rcu_state.exp_mutex ->freezer_mutex.wait_lock ->&rcu_state.expedited_wq ->cgroup_threadgroup_rwsem.waiters.lock ->rcu_state.exp_mutex.wait_lock FD: 28 BD: 4585 -.-.: &p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 69 BD: 140 .+.+: tasklist_lock ->init_task.pi_lock ->init_sighand.siglock ->&p->pi_lock ->&sighand->siglock ->&pid->wait_pidfd ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->stock_lock ->&p->alloc_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock FD: 1 BD: 4664 -.-.: &per_cpu_ptr(group->pcpu, cpu)->seq FD: 1 BD: 1 ....: (kthreadd_done).wait.lock FD: 43 BD: 147 ....: &sighand->siglock ->&sig->wait_chldexit ->input_pool.lock ->&(&sig->stats_lock)->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->hrtimer_bases.lock ->&p->pi_lock ->&obj_hash[i].lock ->&sighand->signalfd_wqh ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&tty->ctrl.lock ->&prev->lock ->quarantine_lock ->&rq->__lock ->stock_lock ->&n->list_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&____s->seqcount#2 FD: 50 BD: 192 +.+.: &p->alloc_lock ->&____s->seqcount#2 ->init_fs.lock ->&fs->lock ->&x->wait ->&memcg->mm_list.lock ->&x->wait#25 ->&newf->file_lock ->&p->pi_lock FD: 1 BD: 4771 .-.-: &____s->seqcount#2 FD: 128 BD: 4107 +.+.: fs_reclaim ->mmu_notifier_invalidate_range_start ->&mapping->i_mmap_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pgd_lock ->stock_lock ->pool_lock#2 ->key ->pcpu_lock ->percpu_counters_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&zone->lock ->&pgdat->kcompactd_wait ->lock#4 ->lock#5 ->batched_entropy_u8.lock ->&lruvec->lru_lock ->&mapping->private_lock ->&sb->s_type->i_lock_key#3 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&sb->s_type->i_lock_key#22 ->&vmpr->sr_lock ->&pgdat->memcg_lru.lock ->&meta->lock ->kfence_freelist_lock ->swap_slots_cache_mutex ->&cache->alloc_lock ->shmem_swaplist_mutex ->&p->lock#2 ->&tree->lock ->&xa->xa_lock#19 ->remove_cache_srcu ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->quarantine_lock ->&folio_wait_table[i] ->&sem->wait_lock ->&p->pi_lock ->&memcg->mm_list.lock ->&n->list_lock FD: 37 BD: 4126 +.+.: mmu_notifier_invalidate_range_start ->dma_fence_map ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->pool_lock#2 FD: 1 BD: 226 +.+.: kthread_create_lock FD: 29 BD: 280 ....: &x->wait ->&p->pi_lock FD: 37 BD: 142 +.+.: wq_pool_attach_mutex ->&p->pi_lock ->&x->wait#7 ->&pool->lock ->&stopper->lock ->&stop_pi_lock ->&rq->__lock ->wq_pool_attach_mutex.wait_lock ->&pool->lock/1 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 4372 ..-.: wq_mayday_lock ->&p->pi_lock FD: 1 BD: 141 ....: &xa->xa_lock FD: 33 BD: 1 +.-.: (&pool->mayday_timer) ->&pool->lock/1 ->&obj_hash[i].lock ->&base->lock ->&pool->lock FD: 57 BD: 1 +.+.: (wq_completion)rcu_gp ->(work_completion)(&rnp->exp_poll_wq) ->(work_completion)(&(&ssp->srcu_sup->work)->work) ->(work_completion)(&sdp->work) ->(work_completion)(&rew->rew_work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 32 BD: 2 +.+.: (work_completion)(&rnp->exp_poll_wq) ->&rnp->exp_poll_lock FD: 14 BD: 1 +.-.: (&wq_watchdog_timer) ->&obj_hash[i].lock ->&base->lock FD: 978 BD: 1 +.+.: (wq_completion)events_unbound ->(work_completion)(&(&kfence_timer)->work) ->(work_completion)(&entry->work) ->(next_reseed).work ->(work_completion)(&sub_info->work) ->(stats_flush_dwork).work ->&rq->__lock ->deferred_probe_work ->(work_completion)(&map->work) ->(work_completion)(&barr->work) ->connector_reaper_work ->(reaper_work).work ->(work_completion)(&port->bc_work) ->(work_completion)(&pool->idle_cull_work) FD: 302 BD: 2 +.+.: (work_completion)(&(&kfence_timer)->work) ->cpu_hotplug_lock ->allocation_wait.lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 29 BD: 3 -.-.: allocation_wait.lock ->&p->pi_lock FD: 1 BD: 4814 ..-.: kfence_freelist_lock FD: 1 BD: 4172 ..-.: &meta->lock FD: 11 BD: 2 ....: rcu_tasks.cbs_gbl_lock ->(console_sem).lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) FD: 3 BD: 3 ....: rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock FD: 3 BD: 139 ....: &ACCESS_PRIVATE(rtpcp, lock) ->&obj_hash[i].lock FD: 5 BD: 2 ....: rcu_tasks_trace.cbs_gbl_lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) FD: 3 BD: 137 ....: rcu_tasks_trace__percpu.cbs_pcpu_lock ->&obj_hash[i].lock FD: 55 BD: 1 +.+.: rcu_tasks.tasks_gp_mutex ->rcu_tasks.cbs_gbl_lock ->&rq->__lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock ->&base->lock ->tasks_rcu_exit_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->tasks_rcu_exit_srcu ->&x->wait#3 ->kernel/rcu/tasks.h:147 ->(&timer.timer) ->&x->wait#2 ->(console_sem).lock ->console_owner_lock FD: 29 BD: 3 ....: &x->wait#2 ->&p->pi_lock FD: 29 BD: 310 ....: &rnp->exp_wq[2] ->&p->pi_lock FD: 32 BD: 6 ....: tasks_rcu_exit_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 1 BD: 2 ....: tasks_rcu_exit_srcu FD: 41 BD: 2 +.+.: (work_completion)(&(&ssp->srcu_sup->work)->work) ->&ssp->srcu_sup->srcu_gp_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->&ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rq->__lock ->tracepoint_srcu_srcu_usage.lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 40 BD: 3 +.+.: &ssp->srcu_sup->srcu_gp_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->&rq->__lock ->&ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->tracepoint_srcu_srcu_usage.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 23 ....: &x->wait#3 ->&p->pi_lock FD: 304 BD: 1 +.+.: rcu_tasks_trace.tasks_gp_mutex ->rcu_tasks_trace.cbs_gbl_lock ->&rq->__lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->cpu_hotplug_lock ->&x->wait#2 ->&obj_hash[i].lock ->&base->lock ->(&timer.timer) ->(console_sem).lock FD: 37 BD: 4 +.+.: &ssp->srcu_sup->srcu_cb_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->tracepoint_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 33 BD: 2 +.+.: (work_completion)(&sdp->work) ->&ACCESS_PRIVATE(sdp, lock) ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->pool_lock#2 ->rcu_node_0 ->&base->lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 1 BD: 2 ....: kernel/rcu/tasks.h:147 FD: 5 BD: 1 -.-.: (null) ->tk_core.seq.seqcount FD: 33 BD: 1 ..-.: &(&kfence_timer)->timer FD: 29 BD: 197 +.-.: (&timer.timer) ->&p->pi_lock FD: 29 BD: 306 ....: &rnp->exp_wq[3] ->&p->pi_lock FD: 1 BD: 1 ....: &nmi_desc[0].lock FD: 169 BD: 138 +.+.: smpboot_threads_lock ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->cpuset_rwsem FD: 29 BD: 4037 ..-.: &rcu_state.gp_wq ->&p->pi_lock FD: 28 BD: 300 -.-.: &stop_pi_lock ->&rq->__lock FD: 1 BD: 299 -.-.: &stopper->lock FD: 1 BD: 2 +.+.: (module_notify_list).rwsem FD: 1 BD: 1 +.+.: ddebug_lock FD: 1 BD: 1 ....: rcu_callback FD: 1 BD: 1 .+.+: &pmus_srcu FD: 301 BD: 1 +.+.: watchdog_mutex ->cpu_hotplug_lock FD: 29 BD: 137 ....: &x->wait#4 ->&p->pi_lock FD: 1075 BD: 1 +.+.: (wq_completion)events ->(work_completion)(&sscs.work) ->pcpu_balance_work ->(work_completion)(&pwq->unbound_release_work) ->(shepherd).work ->(work_completion)(&rfkill_global_led_trigger_work) ->timer_update_work ->(work_completion)(&p->wq) ->(work_completion)(&(&group->avgs_work)->work) ->(work_completion)(&(&krcp->monitor_work)->work) ->(work_completion)(&helper->damage_work) ->(work_completion)(&rfkill->sync_work) ->(linkwatch_work).work ->(work_completion)(&w->work) ->(work_completion)(&vi->config_work) ->(work_completion)(&blkg->free_work) ->(work_completion)(&gadget->work) ->kernfs_notify_work ->async_lookup_work ->autoload_work ->(work_completion)(&barr->work) ->drain_vmap_work ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->netstamp_work ->reg_work ->(work_completion)(&fw_work->work) ->(delayed_fput_work).work ->(work_completion)(&s->destroy_work) ->(work_completion)(&aux->work) ->(work_completion)(&ht->run_work) ->(work_completion)(&w->w) ->(work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->(debug_obj_work).work ->(deferred_probe_timeout_work).work ->(work_completion)(&w->work)#2 ->(regulator_init_complete_work).work ->(work_completion)(&cgrp->bpf.release_work) ->deferred_process_work ->(work_completion)(&data->fib_event_work) ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->(work_completion)(&(&conn->info_timer)->work) ->(work_completion)(&rdev->wiphy_work) ->wireless_nlevent_work ->(work_completion)(&pool->work) ->fqdir_free_work ->free_ipc_work ->xfrm_state_gc_work ->(work_completion)(&(&psock->rwork)->work) ->&rq->__lock ->(work_completion)(&(&devlink->rwork)->work) ->(work_completion)(&rdev->destroy_work) ->(work_completion)(&nlk->work) ->(ima_keys_delayed_work).work ->(work_completion)(&umem->work) ->((ipv6_flowlabel_exclusive).work).work ->((tcp_md5_needed).work).work ->(work_completion)(&smcibdev->port_event_work) ->(work_completion)(&data->dm_alert_work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->(work_completion)(&aux->work)#2 ->(work_completion)(&msk->work) ->(work_completion)(&crct10dif_rehash_work) ->(work_completion)(&port->wq) ->(work_completion)(&net->xfrm.policy_hthresh.work) ->&rcu_state.expedited_wq ->(work_completion)(&(&krcp->page_cache_work)->work) FD: 31 BD: 2 +.+.: (work_completion)(&sscs.work) ->&x->wait#5 ->&obj_hash[i].lock ->hrtimer_bases.lock ->&x->wait#4 FD: 1 BD: 139 -.-.: &x->wait#5 FD: 2 BD: 195 +.+.: &newf->file_lock ->&newf->resize_wait FD: 1 BD: 1 ....: &p->vtime.seqcount FD: 40 BD: 136 +.+.: mem_hotplug_lock ->mem_hotplug_lock.rss.gp_wait.lock FD: 3 BD: 137 ..-.: mem_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 136 ....: mem_hotplug_lock.waiters.lock FD: 303 BD: 1 +.+.: cpu_add_remove_lock ->cpu_hotplug_lock ->cpu_hotplug_lock.waiters.lock ->&rq->__lock ->cpu_hotplug_lock.rss.gp_wait.lock ->spec_ctrl_mutex ->cpuset_hotplug_work FD: 3 BD: 136 ..-.: cpu_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 137 +.+.: pcp_batch_high_lock FD: 1 BD: 136 +.+.: relay_channels_mutex FD: 173 BD: 142 +.+.: sparse_irq_lock ->text_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->(console_sem).lock ->rtc_lock ->&rq->__lock ->&irq_desc_lock_class ->lock ->&root->kernfs_rwsem ->&c->lock ->pcpu_alloc_mutex ->&obj_hash[i].lock ->&zone->lock ->&n->list_lock FD: 1 BD: 144 ....: rtc_lock FD: 1 BD: 4664 ....: &rq->__lock/1 FD: 29 BD: 137 ....: &x->wait#6 ->&p->pi_lock FD: 1 BD: 4664 -.-.: &cfs_rq->removed.lock FD: 1 BD: 143 ....: &x->wait#7 FD: 29 BD: 136 ....: cpu_hotplug_lock.waiters.lock ->&p->pi_lock FD: 18 BD: 4664 -.-.: &rt_b->rt_runtime_lock ->&rt_rq->rt_runtime_lock ->tk_core.seq.seqcount ->hrtimer_bases.lock FD: 1 BD: 4665 -...: &rt_rq->rt_runtime_lock FD: 1 BD: 2 +.+.: cpuset_hotplug_work FD: 31 BD: 136 +.+.: stop_cpus_mutex ->&stopper->lock ->&stop_pi_lock ->&rq->__lock ->&x->wait#8 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 138 ....: &x->wait#8 FD: 140 BD: 142 +.+.: sched_domains_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->pcpu_alloc_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->&rq->__lock ->pcpu_lock ->&dl_b->lock ->css_set_lock ->&p->pi_lock ->&stop_pi_lock FD: 1 BD: 4664 ....: &cp->lock FD: 1 BD: 1 +.+.: (memory_chain).rwsem FD: 141 BD: 1 +.+.: &type->s_umount_key#5/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#5 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&dentry->d_lock FD: 55 BD: 4136 +.+.: &sb->s_type->i_lock_key#5 ->&dentry->d_lock ->&xa->xa_lock#6 FD: 29 BD: 1 ....: (setup_done).wait.lock ->&p->pi_lock FD: 143 BD: 25 ++++: namespace_sem ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->rename_lock ->&obj_hash[i].lock ->&new_ns->ns_lock ->&rq->__lock ->stock_lock ->&____s->seqcount#2 ->remove_cache_srcu ->&n->list_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->namespace_sem.wait_lock ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 160 +.+.: &____s->seqcount#3 FD: 130 BD: 1 +.+.: &type->s_umount_key#6 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&lru->node[i].lock ->&sbinfo->stat_lock ->&obj_hash[i].lock FD: 29 BD: 4197 +.+.: &lru->node[i].lock FD: 144 BD: 8 ++++: &sb->s_type->i_mutex_key ->namespace_sem ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#2 ->&wb->list_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dentry->d_lock/1 ->&obj_hash[i].lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 42 BD: 25 +.+.: rename_lock ->rename_lock.seqcount FD: 41 BD: 193 +.+.: rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/2 FD: 1 BD: 173 ....: &new_ns->poll FD: 2 BD: 4198 +.+.: &____s->seqcount#4 ->&____s->seqcount#4/1 FD: 42 BD: 187 +.+.: &fs->lock ->&____s->seqcount#3 ->&dentry->d_lock FD: 1 BD: 168 +.+.: req_lock FD: 153 BD: 1 +.+.: of_mutex ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem FD: 1 BD: 202 ....: &x->wait#9 FD: 1 BD: 229 +.+.: &k->list_lock FD: 28 BD: 208 ++++: bus_type_sem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 33 BD: 216 -...: &dev->power.lock ->&dev->power.lock/1 ->&dev->power.wait_queue FD: 34 BD: 204 +.+.: dpm_list_mtx ->(console_sem).lock ->&rq->__lock FD: 138 BD: 217 +.+.: uevent_sock_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&rq->__lock ->&cfs_rq->removed.lock ->&zone->lock ->quarantine_lock ->rlock-AF_NETLINK ->rcu_node_0 ->remove_cache_srcu ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->uevent_sock_mutex.wait_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->&base->lock FD: 1 BD: 198 ....: running_helpers_waitq.lock FD: 1 BD: 216 +.+.: sysfs_symlink_target_lock FD: 2 BD: 279 +.+.: &k->k_lock ->klist_remove_lock FD: 1 BD: 1 ....: &dev->mutex FD: 1 BD: 1 +.+.: subsys mutex FD: 2 BD: 1 +.+.: memory_blocks.xa_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#2 FD: 131 BD: 12 +.+.: register_lock ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock FD: 1 BD: 2 +.+.: (pm_chain_head).rwsem FD: 1 BD: 1 +.+.: cpufreq_governor_mutex FD: 43 BD: 2 +.+.: (work_completion)(&rew->rew_work) ->rcu_node_0 ->rcu_state.exp_wake_mutex ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&pool->lock ->&cfs_rq->removed.lock ->pool_lock#2 ->pool_lock ->rcu_state.exp_wake_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 1 +.+.: dyn_event_ops_mutex FD: 1 BD: 2 ++++: binfmt_lock FD: 1 BD: 103 +.+.: pin_fs_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#7/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#6 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 3 +.+.: &sb->s_type->i_lock_key#6 ->&dentry->d_lock FD: 131 BD: 1 +.+.: &sb->s_type->i_mutex_key#2 ->&sb->s_type->i_lock_key#6 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 29 BD: 4196 ....: &wq ->&p->pi_lock FD: 1 BD: 36 +.+.: chrdevs_lock FD: 952 BD: 1 ++++: cb_lock ->genl_mutex ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->&c->lock ->rtnl_mutex ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount ->&rdev->wiphy.mtx ->nlk_cb_mutex-GENERIC ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->genl_mutex.wait_lock ->&p->pi_lock ->rtnl_mutex.wait_lock ->&lock->wait_lock ->&____s->seqcount#2 ->rcu_node_0 ->(console_sem).lock ->console_owner_lock ->console_owner ->once_lock ->nf_hook_mutex ->cpu_hotplug_lock ->&ilan->xlat.locks ->&rcu_state.expedited_wq ->&dir->lock#2 ->&devlink->lock_key#4 ->stock_lock ->ovs_mutex ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&cfs_rq->removed.lock ->&devlink->lock_key#7 ->&devlink->lock_key#9 ->quarantine_lock ->&data->lock ->&devlink->lock_key#10 ->&devlink->lock_key#8 ->&sdata->lock FD: 928 BD: 5 +.+.: genl_mutex ->fs_reclaim ->pool_lock#2 ->nl_table_lock ->nl_table_wait.lock ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&c->lock ->&n->list_lock ->&____s->seqcount ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->quarantine_lock ->genl_mutex.wait_lock ->remove_cache_srcu ->hwsim_radio_lock ->&x->wait#9 ->batched_entropy_u32.lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&zone->lock ->uevent_sock_mutex ->subsys mutex#55 ->device_links_lock ->&k->k_lock ->deferred_probe_mutex ->cpu_hotplug_lock ->wq_pool_mutex ->crngs.lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(inetaddr_chain).rwsem ->inet6addr_chain.lock ->&____s->seqcount#2 ->&tn->node_list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->(console_sem).lock ->console_owner_lock ->console_owner ->nfc_devlist_mutex ->smc_lgr_list.lock ->&ht->lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->pcpu_alloc_mutex ->msk_lock-AF_INET ->mlock-AF_INET ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->(work_completion)(&msk->work) ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->&table->hash[i].lock ->k-clock-AF_INET6 ->nbd_index_mutex ->&nbd->config_lock ->&cfs_rq->removed.lock ->&fn->fou_lock ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->net_dm_mutex ->calipso_doi_list_lock ->&pnettable->lock ->&data->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&pn->l2tp_tunnel_idr_lock ->l2tp_ip_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->__ip_vs_mutex ->&meta->lock ->key#21 ->k-clock-AF_INET ->&pernet->lock ->sk_lock-AF_NETLINK ->slock-AF_NETLINK ->key#16 FD: 140 BD: 1 +.+.: &type->s_umount_key#8/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 96 +.+.: &sb->s_type->i_lock_key#7 ->&dentry->d_lock ->bit_wait_table + i FD: 141 BD: 94 +.+.: &sb->s_type->i_mutex_key#3 ->&sb->s_type->i_lock_key#7 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->(console_sem).lock ->rcu_node_0 ->remove_cache_srcu ->pin_fs_lock ->mount_lock ->&fsnotify_mark_srcu ->&xa->xa_lock#6 ->&____s->seqcount#2 ->&rcu_state.gp_wq ->&xa->xa_lock#12 ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 4 +.+.: subsys mutex#3 FD: 4 BD: 6 ....: async_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 403 BD: 2 +.+.: (work_completion)(&entry->work) ->tk_core.seq.seqcount ->&dev->power.lock ->&k->list_lock ->sysfs_symlink_target_lock ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->lock ->&root->kernfs_rwsem ->&x->wait#9 ->&c->lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->async_lock ->async_done.lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->sb_writers#2 ->&pool->lock/1 ->cpu_hotplug_lock ->wq_pool_mutex ->&n->list_lock ->pcpu_alloc_mutex ->batched_entropy_u32.lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->queue_lock ->major_names_lock ->floppy_lock ->rtc_lock ->&wq->mutex ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock ->resource_lock ->&rq->__lock ->&base->lock ->(&timer.timer) ->command_done.lock ->&shost->scan_mutex ->(console_sem).lock ->console_owner_lock ->console_owner ->async_scan_lock ->klist_remove_lock ->kernfs_idr_lock ->(&motor_off_timer[drive]) ->&xa->xa_lock#7 ->&q->unused_hctx_lock ->(&sq->pending_timer) ->(work_completion)(&td->dispatch_work) ->&q->blkcg_mutex ->pcpu_lock ->&xa->xa_lock#6 ->&fsnotify_mark_srcu FD: 1 BD: 20 .+.+: device_links_srcu FD: 1 BD: 3 +.+.: regulator_list_mutex FD: 3 BD: 19 +.+.: fwnode_link_lock ->&k->k_lock FD: 31 BD: 114 +.+.: device_links_lock ->&k->list_lock ->&k->k_lock ->&rq->__lock FD: 1 BD: 4 ....: &dev->devres_lock FD: 1 BD: 4 +.+.: regulator_nesting_mutex FD: 2 BD: 1 +.+.: regulator_ww_class_mutex ->regulator_nesting_mutex FD: 157 BD: 177 +.+.: gdp_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->kobj_ns_type_lock ->&obj_hash[i].lock ->sysfs_symlink_target_lock ->&rq->__lock ->kernfs_idr_lock ->&sem->wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->&n->list_lock ->gdp_mutex.wait_lock FD: 3 BD: 3 +.+.: subsys mutex#4 ->&k->k_lock FD: 28 BD: 114 +.+.: deferred_probe_mutex ->&rq->__lock FD: 1 BD: 18 ....: probe_waitqueue.lock FD: 1 BD: 3 ....: async_done.lock FD: 139 BD: 1 +.+.: &type->s_umount_key#9/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 4138 +.+.: &sb->s_type->i_lock_key#8 ->&dentry->d_lock ->bit_wait_table + i FD: 137 BD: 79 +.+.: pack_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->vmap_purge_lock ->cpa_lock ->text_mutex ->&rq->__lock FD: 29 BD: 4165 -.-.: &rcu_state.expedited_wq ->&p->pi_lock FD: 28 BD: 78 +.+.: &fp->aux->used_maps_mutex ->&rq->__lock FD: 1 BD: 1 +.+.: proto_list_mutex FD: 1 BD: 1 +.+.: targets_mutex FD: 30 BD: 3774 ...-: nl_table_lock ->pool_lock#2 ->nl_table_wait.lock ->&obj_hash[i].lock ->&c->lock FD: 29 BD: 3775 ..-.: nl_table_wait.lock ->&p->pi_lock FD: 1 BD: 1 +.+.: net_family_lock FD: 2 BD: 5 ....: net_generic_ids.xa_lock ->pool_lock#2 FD: 5 BD: 123 ..-.: &dir->lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock FD: 36 BD: 5 +.+.: k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK ->&rq->__lock FD: 1 BD: 6 +...: k-slock-AF_NETLINK FD: 32 BD: 3516 ..-.: rhashtable_bucket ->rhashtable_bucket/1 FD: 1 BD: 10 ....: &list->lock FD: 29 BD: 10 ....: kauditd_wait.lock ->&p->pi_lock FD: 3 BD: 2 +.+.: lock#2 ->&zone->lock FD: 133 BD: 1 +.+.: khugepaged_mutex ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock ->lock#2 ->pcp_batch_high_lock FD: 3 BD: 13 +.+.: subsys mutex#5 ->&k->k_lock FD: 4 BD: 1 +.+.: subsys mutex#6 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 +.+.: regmap_debugfs_early_lock FD: 1 BD: 1 +.+.: (acpi_reconfig_chain).rwsem FD: 1 BD: 1 +.+.: __i2c_board_lock FD: 132 BD: 1 +.+.: core_lock ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 FD: 2 BD: 1 +.+.: thermal_governor_lock ->thermal_list_lock FD: 1 BD: 2 +.+.: thermal_list_lock FD: 159 BD: 1 +.+.: cpuidle_lock ->&obj_hash[i].lock ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount FD: 132 BD: 1 +.+.: k-sk_lock-AF_QIPCRTR ->k-slock-AF_QIPCRTR ->fs_reclaim ->qrtr_ports.xa_lock ->pool_lock#2 ->qrtr_node_lock ->&obj_hash[i].lock FD: 1 BD: 2 +...: k-slock-AF_QIPCRTR FD: 1 BD: 2 +.+.: qrtr_ports.xa_lock FD: 1 BD: 2 +.+.: qrtr_node_lock FD: 44 BD: 139 ....: freezer_lock ->&sighand->siglock ->&p->pi_lock FD: 1 BD: 1 ....: printk_ratelimit_state.lock FD: 1 BD: 1 ....: audit_backlog_wait.lock FD: 131 BD: 148 ++++: (crypto_chain).rwsem ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&zone->lock ->remove_cache_srcu ->&n->list_lock ->&cfs_rq->removed.lock FD: 302 BD: 1 +.+.: iova_cache_mutex ->cpu_hotplug_lock ->slab_mutex FD: 3 BD: 1 +.+.: subsys mutex#7 ->&k->k_lock FD: 1 BD: 142 ....: pci_config_lock FD: 1 BD: 1 +.+.: subsys mutex#8 FD: 133 BD: 107 +.+.: dev_pm_qos_mtx ->fs_reclaim ->pool_lock#2 ->&dev->power.lock ->pm_qos_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&rq->__lock FD: 1 BD: 108 ....: pm_qos_lock FD: 158 BD: 106 +.+.: dev_pm_qos_sysfs_mtx ->dev_pm_qos_mtx ->&root->kernfs_rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 1 ..-.: uidhash_lock FD: 1 BD: 1 +.+.: detected_devices_mutex FD: 136 BD: 1 +.+.: (work_completion)(&eval_map_work) ->trace_event_sem ->trace_event_sem.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 1 ....: oom_reaper_wait.lock FD: 1 BD: 1 +.+.: subsys mutex#9 FD: 29 BD: 4108 ....: &pgdat->kcompactd_wait ->&p->pi_lock FD: 135 BD: 2 +.+.: pcpu_balance_work ->pcpu_alloc_mutex ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 181 BD: 1 +.+.: memory_tier_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->&____s->seqcount ->&zone->lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#10 FD: 1 BD: 2 +.+.: subsys mutex#10 FD: 1 BD: 1 +.+.: ksm_thread_mutex FD: 1 BD: 1 ....: ksm_thread_wait.lock FD: 1 BD: 2 +.+.: damon_ops_lock FD: 133 BD: 147 ++++: crypto_alg_sem ->(crypto_chain).rwsem ->&rq->__lock ->crypto_alg_sem.wait_lock ->&pool->lock FD: 42 BD: 1 +.+.: lock#3 ->&obj_hash[i].lock ->&rq->__lock ->(work_completion)(work) ->&x->wait#10 ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 127 +.+.: khugepaged_mm_lock FD: 29 BD: 127 ....: khugepaged_wait.lock ->&p->pi_lock FD: 155 BD: 2 +.+.: (work_completion)(&pwq->unbound_release_work) ->&wq->mutex ->wq_pool_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&pool->lock ->&rnp->exp_wq[0] ->&rq->__lock ->&rnp->exp_wq[1] ->&rnp->exp_lock ->rcu_state.exp_mutex ->&rnp->exp_wq[3] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[2] ->&cfs_rq->removed.lock ->pool_lock ->&rcu_state.expedited_wq FD: 162 BD: 5 +.+.: bio_slab_lock ->fs_reclaim ->pool_lock#2 ->slab_mutex ->bio_slabs.xa_lock FD: 2 BD: 6 +.+.: bio_slabs.xa_lock ->pool_lock#2 FD: 130 BD: 3 +.+.: major_names_lock ->fs_reclaim ->pool_lock#2 ->major_names_spinlock ->&c->lock ->&____s->seqcount FD: 1 BD: 4 +.+.: major_names_spinlock FD: 1 BD: 4163 ..-.: quarantine_lock FD: 39 BD: 4093 .+.+: remove_cache_srcu ->quarantine_lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->&____s->seqcount ->&rcu_state.expedited_wq ->&meta->lock ->kfence_freelist_lock ->stock_lock ->&base->lock FD: 3 BD: 13 +.+.: subsys mutex#11 ->&k->k_lock FD: 1 BD: 1 ....: *(&acpi_gbl_hardware_lock) FD: 7 BD: 1 ....: *(&acpi_gbl_gpe_lock) ->(console_sem).lock FD: 5 BD: 154 ....: mask_lock ->tmp_mask_lock FD: 4 BD: 155 -...: tmp_mask_lock ->vector_lock ->ioapic_lock FD: 1 BD: 1 -...: shrink_qlist.lock FD: 32 BD: 5 ....: remove_cache_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 35 BD: 136 +.+.: flush_lock ->&obj_hash[i].lock ->(work_completion)(&sfw->work) ->&x->wait#10 ->&rq->__lock FD: 10 BD: 138 +.+.: (work_completion)(&sfw->work) ->&c->lock ->&n->list_lock ->&obj_hash[i].lock FD: 32 BD: 137 +.+.: (wq_completion)slub_flushwq ->(work_completion)(&sfw->work) ->(work_completion)(&barr->work) FD: 29 BD: 4241 ....: &x->wait#10 ->&p->pi_lock FD: 30 BD: 142 +.+.: (work_completion)(&barr->work) ->&x->wait#10 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: system_transition_mutex FD: 1 BD: 1 +.+.: (power_off_prep_handler_list).rwsem FD: 1 BD: 1 ....: power_off_handler_list.lock FD: 1 BD: 1 +.+.: (restart_prep_handler_list).rwsem FD: 1 BD: 1 +.+.: (reboot_notifier_list).rwsem FD: 209 BD: 1 +.+.: acpi_scan_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#9 ->acpi_device_lock ->&k->list_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#12 ->uevent_sock_mutex ->running_helpers_waitq.lock ->*(&acpi_gbl_reference_count_lock) ->&n->list_lock ->pci_config_lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->(console_sem).lock ->pci_bus_sem ->pci_mmcfg_lock ->resource_lock ->&device->physical_node_lock ->fwnode_link_lock ->devtree_lock ->gdp_mutex ->subsys mutex#13 ->pci_acpi_companion_lookup_sem ->pci_slot_mutex ->tk_core.seq.seqcount ->resource_alignment_lock ->device_links_srcu ->subsys mutex#14 ->acpi_pm_notifier_install_lock ->pci_rescan_remove_lock ->subsys mutex#3 ->acpi_link_lock ->wakeup_ida.xa_lock ->subsys mutex#15 ->events_lock ->power_resource_list_lock FD: 131 BD: 2 +.+.: acpi_device_lock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#2 ->semaphore->lock ->&obj_hash[i].lock FD: 1 BD: 3 ....: &xa->xa_lock#2 FD: 1 BD: 2 +.+.: subsys mutex#12 FD: 1 BD: 2 ++++: pci_bus_sem FD: 1 BD: 2 +.+.: pci_mmcfg_lock FD: 154 BD: 12 +.+.: &device->physical_node_lock ->sysfs_symlink_target_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem FD: 3 BD: 2 +.+.: subsys mutex#13 ->&k->k_lock FD: 1 BD: 2 .+.+: pci_acpi_companion_lookup_sem FD: 1 BD: 2 +.+.: pci_slot_mutex FD: 1 BD: 2 +.+.: resource_alignment_lock FD: 1 BD: 217 ....: &dev->power.lock/1 FD: 1 BD: 2 +.+.: subsys mutex#14 FD: 179 BD: 2 +.+.: acpi_pm_notifier_install_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->*(&acpi_gbl_reference_count_lock) ->acpi_pm_notifier_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 176 BD: 3 +.+.: acpi_pm_notifier_lock ->fs_reclaim ->pool_lock#2 ->wakeup_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#15 ->events_lock FD: 1 BD: 7 ....: wakeup_ida.xa_lock FD: 30 BD: 7 +.+.: subsys mutex#15 ->&k->k_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 7 ....: events_lock FD: 131 BD: 1 +.+.: &pgdat->kswapd_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&cfs_rq->removed.lock ->rcu_node_0 FD: 33 BD: 1 ..-.: drivers/char/random.c:251 FD: 15 BD: 2 +.+.: (next_reseed).work ->&obj_hash[i].lock ->&base->lock ->input_pool.lock ->base_crng.lock FD: 31 BD: 1 ..-.: mm/vmstat.c:2014 FD: 301 BD: 2 +.+.: (shepherd).work ->cpu_hotplug_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 38 BD: 2 +.+.: (wq_completion)mm_percpu_wq ->(work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->(work_completion)(work) ->(work_completion)(&barr->work) ->&rq->__lock FD: 29 BD: 3 +.+.: (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->&obj_hash[i].lock ->&base->lock ->&pcp->lock ->&rq->__lock FD: 37 BD: 2 +.+.: pci_rescan_remove_lock FD: 138 BD: 2 +.+.: acpi_link_lock ->fs_reclaim ->pool_lock#2 ->semaphore->lock ->&obj_hash[i].lock ->*(&acpi_gbl_reference_count_lock) ->&c->lock ->&zone->lock ->&____s->seqcount ->pci_config_lock ->(console_sem).lock ->&rq->__lock FD: 1 BD: 2 +.+.: power_resource_list_lock FD: 187 BD: 7 ++++: &(&priv->bus_notifier)->rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->i2c_dev_list_lock ->&x->wait#9 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#65 FD: 140 BD: 1 +.+.: &type->s_umount_key#10/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#9 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#9 ->&dentry->d_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#11/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#10 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#10 ->&dentry->d_lock FD: 235 BD: 126 ++++: &mm->mmap_lock ->reservation_ww_class_acquire ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&mm->page_table_lock ->ptlock_ptr(page) ->&anon_vma->rwsem ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->ptlock_ptr(page)#2 ->lock#4 ->lock#5 ->mmu_notifier_invalidate_range_start ->&vma->vm_lock->lock ->&obj_hash[i].lock ->&lruvec->lru_lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&cfs_rq->removed.lock ->rcu_node_0 ->quarantine_lock ->&mapping->i_mmap_rwsem ->&rcu_state.expedited_wq ->resource_lock ->&p->alloc_lock ->tk_core.seq.seqcount ->&mm->mmap_lock/1 ->&sem->wait_lock ->&p->pi_lock ->&n->list_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->remove_cache_srcu ->&folio_wait_table[i] ->&base->lock ->khugepaged_mm_lock ->khugepaged_wait.lock ->&xa->xa_lock#6 ->&info->lock ->mount_lock ->&sb->s_type->i_lock_key ->&wb->list_lock ->&kcov->lock ->stock_lock ->sb_pagefaults ->&mapping->private_lock ->&____s->seqcount#2 ->mapping.invalidate_lock ->&map->freeze_mutex ->lock#10 ->&xa->xa_lock#12 ->&s->s_inode_list_lock ->batched_entropy_u32.lock ->&dd->lock ->&po->pg_vec_lock ->&sb->s_type->i_mutex_key#21 ->&hugetlbfs_i_mmap_rwsem_key ->&vma_lock->rw_sema ->key#23 ->&hugetlb_fault_mutex_table[i] ->hugetlb_lock ->&pgdat->kswapd_wait FD: 135 BD: 142 +.+.: reservation_ww_class_acquire ->reservation_ww_class_mutex FD: 134 BD: 143 +.+.: reservation_ww_class_mutex ->&rq->__lock ->fs_reclaim ->&shmem->vmap_lock FD: 68 BD: 4108 ++++: &mapping->i_mmap_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->&anon_vma->rwsem ->quarantine_lock ->&rq->__lock ->&sem->wait_lock ->rcu_node_0 ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->&p->pi_lock ->&cfs_rq->removed.lock ->&meta->lock ->kfence_freelist_lock ->&base->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->ptlock_ptr(page)#2 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&rcu_state.expedited_wq FD: 1 BD: 4127 +.+.: dma_fence_map FD: 29 BD: 3 +.+.: delayed_uprobe_lock ->&rq->__lock ->delayed_uprobe_lock.wait_lock FD: 1 BD: 3996 ....: key FD: 1 BD: 4 +.+.: attribute_container_mutex FD: 143 BD: 22 ++++: triggers_list_lock ->&led_cdev->trigger_lock FD: 143 BD: 22 ++++: leds_list_lock ->&led_cdev->trigger_lock FD: 193 BD: 2 ++++: (usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&c->lock ->&zone->lock ->&____s->seqcount ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#59 ->mon_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 1 +.+.: rc_map_lock FD: 1 BD: 1 +.+.: subsys mutex#16 FD: 1 BD: 2 +.+.: &entry->access FD: 131 BD: 2 +.+.: info_mutex ->proc_subdir_lock ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->proc_inum_ida.xa_lock ->&c->lock FD: 1 BD: 178 +.+.: kobj_ns_type_lock FD: 30 BD: 75 +.+.: subsys mutex#17 ->&k->k_lock ->&rq->__lock FD: 8 BD: 3652 ..-.: &dir->lock#2 ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 36 BD: 78 +.+.: dev_hotplug_mutex ->&dev->power.lock ->&rq->__lock ->&k->k_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 80 ++++: dev_base_lock FD: 1 BD: 72 ++++: qdisc_mod_lock FD: 19 BD: 1 ++++: bt_proto_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->chan_list_lock ->l2cap_sk_list.lock ->hci_sk_list.lock ->&c->lock ->&n->list_lock ->rfcomm_sk_list.lock ->sco_sk_list.lock ->cmtp_sk_list.lock ->bnep_sk_list.lock FD: 147 BD: 22 +.+.: hci_cb_list_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->chan_list_lock ->&conn->ident_lock ->&base->lock ->&list->lock#12 ->&conn->chan_lock ->&c->lock ->&rq->__lock ->&____s->seqcount ->&n->list_lock ->&list->lock#13 ->(work_completion)(&(&conn->id_addr_timer)->work) ->&rnp->exp_lock ->rcu_state.exp_mutex ->(work_completion)(&(&conn->info_timer)->work) ->&____s->seqcount#2 FD: 217 BD: 4 +.+.: mgmt_chan_list_lock ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->hci_dev_list_lock ->(console_sem).lock ->&rq->__lock ->&hdev->lock ->fs_reclaim ->rlock-AF_BLUETOOTH ->&c->lock FD: 1 BD: 3531 ....: &list->lock#2 FD: 129 BD: 74 +.+.: rate_ctrl_mutex ->fs_reclaim ->pool_lock#2 FD: 2 BD: 6 +.+.: netlbl_domhsh_lock ->pool_lock#2 FD: 1 BD: 72 +.+.: netlbl_unlhsh_lock FD: 197 BD: 1 +.+.: misc_mtx ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&zone->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#18 ->misc_minors_ida.xa_lock ->&cfs_rq->removed.lock ->&base->lock ->&dir->lock ->rfkill_global_mutex ->&____s->seqcount#2 ->remove_cache_srcu ->&n->list_lock FD: 149 BD: 1 .+.+: sb_writers ->mount_lock ->&type->i_mutex_dir_key/1 ->&sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&wb->list_lock ->&type->i_mutex_dir_key#2 ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&dentry->d_lock ->tomoyo_ss ->&xattrs->lock FD: 139 BD: 2 +.+.: &type->i_mutex_dir_key/1 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#5 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&sb->s_type->i_mutex_key#4 ->&c->lock ->&zone->lock ->&____s->seqcount ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->batched_entropy_u8.lock ->tomoyo_ss ->&u->bindlock ->&fsnotify_mark_srcu ->&sem->wait_lock ->remove_cache_srcu ->&n->list_lock ->&rq->__lock ->&sb->s_type->i_mutex_key#4/4 ->krc.lock ->&xa->xa_lock#6 FD: 114 BD: 3 +.+.: &sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->tomoyo_ss ->&xattrs->lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_mutex_key#4/4 ->&rq->__lock ->&sb->s_type->i_lock_key#5 ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->stock_lock ->&xa->xa_lock#6 ->lock#4 ->&info->lock ->rcu_node_0 ->key#9 ->&sem->wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&wb->list_lock ->lock#5 ->&lruvec->lru_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 29 BD: 168 ....: &x->wait#11 ->&p->pi_lock FD: 3 BD: 2 +.+.: subsys mutex#18 ->&k->k_lock FD: 200 BD: 6 +.+.: input_mutex ->&rq->__lock ->input_devices_poll_wait.lock ->fs_reclaim ->pool_lock#2 ->&dev->mutex#2 ->input_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->&led_cdev->led_access ->&pool->lock ->&cfs_rq->removed.lock ->&mousedev->mutex/1 FD: 188 BD: 2 +.+.: (work_completion)(&rfkill_global_led_trigger_work) ->rfkill_global_mutex FD: 187 BD: 14 +.+.: rfkill_global_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&rq->__lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&rfkill->lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#40 ->triggers_list_lock ->leds_list_lock ->&pool->lock ->rfkill_global_mutex.wait_lock ->&zone->lock ->&cfs_rq->removed.lock ->&n->list_lock ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->&data->mtx ->&____s->seqcount#2 ->&sem->wait_lock FD: 1 BD: 7 ....: input_devices_poll_wait.lock FD: 337 BD: 3 ++++: (netlink_chain).rwsem ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->reg_indoor_lock ->hwsim_radio_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&q->instances_lock ->&log->instances_lock ->&nft_net->commit_mutex ->&c->lock ->(console_sem).lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rcu_state.expedited_wq ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 13 BD: 1 ++++: proto_tab_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->raw_sk_list.lock ->&c->lock ->&n->list_lock FD: 3 BD: 1 ....: random_ready_notifier.lock ->crngs.lock FD: 1 BD: 2 ....: misc_minors_ida.xa_lock FD: 8 BD: 1 ....: vga_lock#2 ->pci_config_lock ->(console_sem).lock FD: 140 BD: 1 +.+.: &type->s_umount_key#12/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#11 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#11 ->&dentry->d_lock FD: 329 BD: 1 +.+.: (work_completion)(&tracerfs_init_work) ->pin_fs_lock ->fs_reclaim ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&c->lock ->sb_lock ->&type->s_umount_key#13/1 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->&obj_hash[i].lock ->&sb->s_type->i_mutex_key#5 ->event_mutex ->(module_notify_list).rwsem ->trace_types_lock FD: 140 BD: 2 +.+.: &type->s_umount_key#13/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#12 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 8 +.+.: &sb->s_type->i_lock_key#12 ->&dentry->d_lock FD: 131 BD: 6 +.+.: &sb->s_type->i_mutex_key#5 ->&sb->s_type->i_lock_key#12 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->&obj_hash[i].lock FD: 318 BD: 2 +.+.: event_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->trace_event_sem ->trace_event_sem.wait_lock ->&p->pi_lock ->&rq->__lock ->trace_types_lock ->sched_register_mutex ->tracepoints_mutex ->event_mutex.wait_lock FD: 1 BD: 5 ....: trace_event_sem.wait_lock FD: 302 BD: 2 +.+.: timer_update_work ->timer_keys_mutex FD: 301 BD: 3 +.+.: timer_keys_mutex ->cpu_hotplug_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#14/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#13 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#13 ->&dentry->d_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#15/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#14 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#14 ->&dentry->d_lock ->bit_wait_table + i FD: 140 BD: 1 +.+.: &type->s_umount_key#16/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#15 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 3 +.+.: &sb->s_type->i_lock_key#15 ->&dentry->d_lock FD: 130 BD: 1 +.+.: kclist_lock ->resource_lock ->fs_reclaim ->pool_lock#2 FD: 139 BD: 1 +.+.: &type->s_umount_key#17/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#16 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 4136 +.+.: &sb->s_type->i_lock_key#16 ->&dentry->d_lock FD: 241 BD: 93 .+.+: tomoyo_ss ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tomoyo_policy_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&obj_hash[i].lock ->&dentry->d_lock ->tomoyo_log_lock ->tomoyo_log_wait.lock ->&c->lock ->&zone->lock ->&____s->seqcount ->file_systems_lock ->fs_reclaim ->quarantine_lock ->&mm->mmap_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&n->list_lock ->remove_cache_srcu ->rcu_node_0 ->&base->lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->mount_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->&fs->lock FD: 140 BD: 1 +.+.: &type->s_umount_key#18/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#17 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 4 +.+.: &sb->s_type->i_lock_key#17 ->&dentry->d_lock FD: 133 BD: 1 +.+.: &ns->lock ->&dentry->d_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#6 FD: 131 BD: 2 +.+.: &sb->s_type->i_mutex_key#6 ->&sb->s_type->i_lock_key#17 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount FD: 43 BD: 1 +.+.: &type->s_umount_key#19 ->sb_lock ->&dentry->d_lock FD: 129 BD: 1 +.+.: pnp_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#19 FD: 3 BD: 1 +.+.: subsys mutex#20 ->&k->k_lock FD: 3 BD: 10 +.+.: subsys mutex#21 ->&k->k_lock FD: 3 BD: 1 +.+.: subsys mutex#22 ->&k->k_lock FD: 371 BD: 1 +.+.: tty_mutex ->(console_sem).lock ->console_lock ->fs_reclaim ->pool_lock#2 ->tty_ldiscs_lock ->&obj_hash[i].lock ->&k->list_lock ->&k->k_lock ->&tty->legacy_mutex FD: 4 BD: 1 +.+.: subsys mutex#23 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 ....: netevent_notif_chain.lock FD: 337 BD: 11 ++++: clients_rwsem ->fs_reclaim ->clients.xa_lock ->&device->client_data_rwsem FD: 2 BD: 12 +.+.: clients.xa_lock ->pool_lock#2 FD: 907 BD: 10 ++++: devices_rwsem ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->fs_reclaim ->&c->lock ->pool_lock#2 ->devices.xa_lock ->&obj_hash[i].lock ->(console_sem).lock ->clients_rwsem ->rdma_nets_rwsem ->&pdata->netdev_lock ->&table->lock#4 ->rdma_nets_rwsem.wait_lock ->&p->pi_lock ->devices_rwsem.wait_lock ->&device->event_handler_rwsem ->&ndev->lock ->&cfs_rq->removed.lock FD: 1 BD: 1 +.+.: (blocking_lsm_notifier_chain).rwsem FD: 221 BD: 75 ++++: (inetaddr_chain).rwsem ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->fib_info_lock ->&dir->lock#2 ->&____s->seqcount ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&net->sctp.local_addr_lock ->&rq->__lock ->rlock-AF_NETLINK ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&ipvlan->addrs_lock ->&____s->seqcount#2 ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->krc.lock ->&cfs_rq->removed.lock ->stock_lock ->&dir->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&meta->lock FD: 1 BD: 10 ....: inet6addr_chain.lock FD: 1 BD: 1 +.+.: buses_mutex FD: 1 BD: 1 +.+.: offload_lock FD: 1 BD: 1 +...: inetsw_lock FD: 895 BD: 1 +.+.: (wq_completion)events_power_efficient ->(work_completion)(&(&tbl->managed_work)->work) ->(check_lifetime_work).work ->(work_completion)(&(&cache_cleaner)->work) ->(work_completion)(&(&ops->cursor_work)->work) ->(work_completion)(&(&hub->init_work)->work) ->(work_completion)(&(&tbl->gc_work)->work) ->(work_completion)(&(&gc_work->dwork)->work) ->(reg_check_chans).work ->(crda_timeout).work ->(gc_work).work ->(work_completion)(&(&hinfo->gc_work)->work) ->(work_completion)(&barr->work) ->(work_completion)(&(&flowtable->gc_work)->work) ->&rq->__lock FD: 49 BD: 2 +.+.: (work_completion)(&(&tbl->managed_work)->work) ->&tbl->lock ->&rq->__lock FD: 48 BD: 3498 ++-.: &tbl->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->batched_entropy_u32.lock ->&n->lock ->&c->lock ->&____s->seqcount ->nl_table_lock ->nl_table_wait.lock ->&dir->lock#2 ->krc.lock ->&n->list_lock ->rlock-AF_NETLINK ->&____s->seqcount#2 ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 76 +.+.: ptype_lock FD: 32 BD: 2 +.+.: (check_lifetime_work).work ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 FD: 1 BD: 72 +.+.: &net->rules_mod_lock FD: 1 BD: 1 +.+.: tcp_ulp_list_lock FD: 1 BD: 1 +...: xfrm_state_afinfo_lock FD: 1 BD: 1 +.+.: xfrm_policy_afinfo_lock FD: 1 BD: 1 +...: xfrm_input_afinfo_lock FD: 18 BD: 4277 ..-.: krc.lock ->&obj_hash[i].lock ->hrtimer_bases.lock ->&base->lock FD: 133 BD: 1 +.+.: (wq_completion)events_highpri ->(work_completion)(&(&krcp->page_cache_work)->work) ->(work_completion)(flush) ->(work_completion)(&barr->work) FD: 129 BD: 3 +.+.: (work_completion)(&(&krcp->page_cache_work)->work) ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->krc.lock FD: 1 BD: 3 +.+.: &hashinfo->lock FD: 1 BD: 1 +.+.: tcp_cong_list_lock FD: 2 BD: 7 +.+.: cache_list_lock ->&cd->hash_lock FD: 1 BD: 1 +.+.: (rpc_pipefs_notifier_list).rwsem FD: 1 BD: 1 +.+.: svc_xprt_class_lock FD: 7 BD: 1 +.+.: xprt_list_lock ->(console_sem).lock FD: 33 BD: 2 +.+.: (work_completion)(&(&cache_cleaner)->work) ->cache_list_lock ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 1 BD: 1 ....: pcibios_fwaddrmap_lock FD: 145 BD: 3 .+.+: sb_writers#2 ->mount_lock ->&sb->s_type->i_mutex_key/1 ->&sb->s_type->i_mutex_key FD: 141 BD: 4 +.+.: &sb->s_type->i_mutex_key/1 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->tomoyo_ss ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#2 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key ->&c->lock ->&____s->seqcount FD: 1 BD: 2 +.+.: tomoyo_log_lock FD: 1 BD: 2 ....: tomoyo_log_wait.lock FD: 68 BD: 4135 +.+.: &wb->list_lock ->&sb->s_type->i_lock_key#2 ->&sb->s_type->i_lock_key#23 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_lock_key ->&sb->s_type->i_lock_key#5 ->&sb->s_type->i_lock_key#8 ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#3 ->&p->sequence ->key#10 ->&sb->s_type->i_lock_key#27 ->&sb->s_type->i_lock_key#16 ->&sb->s_type->i_lock_key#31 FD: 14 BD: 1 +.-.: (&tcp_orphan_timer) ->&obj_hash[i].lock ->&base->lock FD: 192 BD: 3 ++++: umhelper_sem ->usermodehelper_disabled_waitq.lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->subsys mutex#79 ->fw_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&x->wait#23 ->&base->lock ->&pool->lock ->(&timer.timer) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start FD: 1 BD: 4 ....: usermodehelper_disabled_waitq.lock FD: 213 BD: 2 +.+.: (work_completion)(&sub_info->work) ->&sighand->siglock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->batched_entropy_u64.lock ->&obj_hash[i].lock ->init_files.file_lock ->init_fs.lock ->&p->alloc_lock ->lock ->pidmap_lock ->cgroup_threadgroup_rwsem ->input_pool.lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->&sig->wait_chldexit ->tasklist_lock ->&prev->lock ->css_set_lock ->&x->wait#17 ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->remove_cache_srcu ->&____s->seqcount#2 ->quarantine_lock ->&rcu_state.expedited_wq ->&meta->lock FD: 1 BD: 1 +.+.: &drv->dynids.lock FD: 1 BD: 1 +.+.: umh_sysctl_lock FD: 67 BD: 4091 ++++: &anon_vma->rwsem ->&mm->page_table_lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&c->lock ->&n->list_lock ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->quarantine_lock ->rcu_node_0 ->&rcu_state.gp_wq ->&sem->wait_lock ->&base->lock ->ptlock_ptr(page)#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->batched_entropy_u8.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4686 -.-.: per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 10 BD: 4136 +.+.: lock#4 ->&lruvec->lru_lock ->&obj_hash[i].lock ->&pcp->lock ->lock#11 FD: 274 BD: 1 +.+.: &sig->cred_guard_mutex ->fs_reclaim ->pool_lock#2 ->&fs->lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->delayed_uprobe_lock ->&mm->mmap_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->&rcu_state.gp_wq ->pool_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->&dentry->d_lock/1 ->init_fs.lock ->&type->i_mutex_dir_key#3 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_mutex_key#8 ->&p->pi_lock ->aa_buffers_lock ->mapping.invalidate_lock ->&folio_wait_table[i] ->tomoyo_ss ->&iint->mutex ->binfmt_lock ->entries_lock ->&ei->xattr_sem ->&tsk->futex_exit_mutex ->&sig->exec_update_lock ->&p->alloc_lock ->tk_core.seq.seqcount ->&n->list_lock ->key#5 ->&stopper->lock ->&stop_pi_lock ->&lock->wait_lock ->&x->wait#8 ->remove_cache_srcu ->&____s->seqcount#2 ->&rcu_state.expedited_wq FD: 2 BD: 4144 ..-.: &lruvec->lru_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4129 +.+.: lock#5 FD: 129 BD: 130 ++++: &vma->vm_lock->lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->ptlock_ptr(page)#2 ->mmu_notifier_invalidate_range_start ->&rq->__lock ->&lruvec->lru_lock ->&obj_hash[i].lock ->ptlock_ptr(page) ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.gp_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rcu_state.expedited_wq ->remove_cache_srcu ->&p->pi_lock ->stock_lock ->&n->list_lock ->&sem->wait_lock ->&____s->seqcount#2 ->lock#4 ->lock#5 FD: 236 BD: 2 +.+.: &tsk->futex_exit_mutex ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&mm->mmap_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 31 BD: 1 +.+.: &child->perf_event_mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 141 ....: &pid->wait_pidfd FD: 29 BD: 148 ....: &sig->wait_chldexit ->&p->pi_lock FD: 15 BD: 148 ....: &(&sig->stats_lock)->lock ->&____s->seqcount#5 FD: 14 BD: 149 ....: &____s->seqcount#5 ->pidmap_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 7 BD: 1 +.+.: low_water_lock ->(console_sem).lock FD: 304 BD: 1 +.+.: vendor_module_lock ->slab_mutex ->pcpu_alloc_mutex ->&obj_hash[i].lock ->percpu_counters_lock ->fs_reclaim ->pool_lock#2 ->shrinker_rwsem ->&____s->seqcount ->&zone->lock ->cpu_hotplug_lock ->timekeeper_lock FD: 31 BD: 1 ..-.: &(&cache_cleaner)->timer FD: 1 BD: 156 +.+.: text_mutex.wait_lock FD: 1 BD: 4732 -.-.: pvclock_gtod_data FD: 147 BD: 2 ++++: &type->i_mutex_dir_key#2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->namespace_sem ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&sem->wait_lock ->&rq->__lock ->&n->list_lock ->&xa->xa_lock#12 ->&obj_hash[i].lock ->stock_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->batched_entropy_u32.lock FD: 3 BD: 138 +.+.: subsys mutex#24 ->&k->k_lock FD: 3 BD: 138 +.+.: subsys mutex#25 ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#26 FD: 199 BD: 1 +.+.: subsys mutex#27 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->platform_devid_ida.xa_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->&c->lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->subsys mutex#3 ->wakeup_ida.xa_lock ->gdp_mutex ->subsys mutex#15 ->events_lock ->rtcdev_lock FD: 1 BD: 1 +.+.: subsys mutex#28 FD: 1 BD: 218 +.+.: pcpu_alloc_mutex.wait_lock FD: 38 BD: 2 +.+.: (work_completion)(&p->wq) ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->quarantine_lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&base->lock ->&rcu_state.expedited_wq ->&meta->lock ->kfence_freelist_lock FD: 31 BD: 1 ..-.: &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer FD: 33 BD: 1 ..-.: mm/memcontrol.c:589 FD: 29 BD: 2 +.+.: (stats_flush_dwork).work ->cgroup_rstat_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 2 BD: 19 ....: cgroup_rstat_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 31 BD: 1 ..-.: &(&group->avgs_work)->timer FD: 29 BD: 2 +.+.: (work_completion)(&(&group->avgs_work)->work) ->&group->avgs_lock FD: 28 BD: 3 +.+.: &group->avgs_lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 31 BD: 1 ..-.: &(&krcp->monitor_work)->timer FD: 31 BD: 1 ..-.: &(&tbl->managed_work)->timer FD: 35 BD: 2 +.+.: (work_completion)(&(&krcp->monitor_work)->work) ->krc.lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 139 +.+.: subsys mutex#29 FD: 1 BD: 4 +.+.: key_user_lock FD: 1 BD: 4 +.+.: key_serial_lock FD: 5 BD: 5 +.+.: key_construction_mutex ->&obj_hash[i].lock ->pool_lock#2 ->keyring_name_lock FD: 136 BD: 3 +.+.: &type->lock_class ->keyring_serialise_link_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->key_user_lock ->crngs.lock ->key_serial_lock ->key_construction_mutex ->ima_keys_lock FD: 132 BD: 4 +.+.: keyring_serialise_link_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->root_key_user.lock ->key_construction_mutex FD: 29 BD: 140 ..-.: &pgdat->kswapd_wait ->&p->pi_lock FD: 1 BD: 1 +.+.: drivers_lock FD: 143 BD: 1 +.+.: damon_dbgfs_lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->damon_ops_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 140 BD: 1 +.+.: &type->s_umount_key#20/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#18 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#18 ->&dentry->d_lock FD: 1 BD: 1 +.+.: dq_list_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#21/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 7 +.+.: &sb->s_type->i_lock_key#19 ->&dentry->d_lock FD: 1 BD: 1 +.+.: configfs_subsystem_mutex FD: 138 BD: 1 +.+.: &sb->s_type->i_mutex_key#7/1 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&____s->seqcount ->&c->lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]/2 ->&default_group_class[depth - 1]#2 FD: 1 BD: 8 +.+.: configfs_dirent_lock FD: 136 BD: 2 +.+.: &default_group_class[depth - 1]/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#3/2 FD: 129 BD: 1 +.+.: ecryptfs_daemon_hash_mux ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 ....: &ecryptfs_kthread_ctl.wait FD: 2 BD: 1 +.+.: ecryptfs_msg_ctx_lists_mux ->&ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 2 +.+.: &ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 1 +.+.: nfs_version_lock FD: 151 BD: 1 ++++: key_types_sem ->(console_sem).lock ->asymmetric_key_parsers_sem ->&type->lock_class ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: pnfs_spinlock FD: 1 BD: 5 +.+.: &sn->pipefs_sb_lock FD: 1 BD: 1 +.+.: nls_lock FD: 1 BD: 1 +.+.: jffs2_compressor_list_lock FD: 1 BD: 1 +.+.: next_tag_value_lock FD: 1 BD: 1 ....: log_redrive_lock FD: 2 BD: 1 ....: &TxAnchor.LazyLock ->jfs_commit_thread_wait.lock FD: 1 BD: 2 ....: jfs_commit_thread_wait.lock FD: 1 BD: 1 +.+.: jfsTxnLock FD: 7 BD: 1 +.+.: ocfs2_stack_lock ->(console_sem).lock FD: 1 BD: 1 +.+.: o2hb_callback_sem FD: 1 BD: 1 +.+.: o2net_handler_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#22/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock ->&n->list_lock ->&rq->__lock ->&xa->xa_lock#12 ->&obj_hash[i].lock ->stock_lock ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->remove_cache_srcu ->&____s->seqcount#2 FD: 41 BD: 5 +.+.: &sb->s_type->i_lock_key#20 ->&dentry->d_lock FD: 302 BD: 88 +.+.: nf_hook_mutex ->fs_reclaim ->&____s->seqcount ->&zone->lock ->pool_lock#2 ->&c->lock ->stock_lock ->&____s->seqcount#2 ->&rq->__lock ->nf_hook_mutex.wait_lock ->rcu_node_0 ->remove_cache_srcu ->&n->list_lock ->cpu_hotplug_lock ->&obj_hash[i].lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 129 BD: 1 ++++: alg_types_sem ->fs_reclaim ->pool_lock#2 ->&rq->__lock FD: 1 BD: 1 +.+.: dma_list_mutex FD: 142 BD: 2 ++++: asymmetric_key_parsers_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->crypto_alg_sem ->&obj_hash[i].lock ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->&rq->__lock ->(&timer.timer) ->&____s->seqcount ->&zone->lock ->&c->lock FD: 881 BD: 1 +.+.: blkcg_pol_register_mutex ->blkcg_pol_mutex ->cgroup_mutex FD: 1 BD: 4 +.+.: elv_list_lock FD: 134 BD: 3 +.+.: crc_t10dif_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 134 BD: 1 +.+.: crc64_rocksoft_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ts_mod_lock FD: 3 BD: 7 +.+.: subsys mutex#30 ->&k->k_lock FD: 37 BD: 10 +.+.: &dev->mutex#2 ->&obj_hash[i].lock ->&rnp->exp_wq[3] ->&rq->__lock ->&rnp->exp_lock ->&lock->wait_lock ->&rnp->exp_wq[2] FD: 32 BD: 5 ....: wakeup_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 1 BD: 3 ....: wakeup_srcu FD: 31 BD: 1 ..-.: &(&ssp->srcu_sup->work)->timer FD: 1 BD: 3 ....: (&ws->timer) FD: 1 BD: 280 +.+.: klist_remove_lock FD: 5 BD: 3796 ....: &ws->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 1 BD: 3 ....: deleted_ws.lock FD: 167 BD: 1 +.+.: register_count_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&rq->__lock FD: 1 BD: 1 +.+.: (cpufreq_policy_notifier_list).rwsem FD: 1 BD: 1 +.+.: cpuidle_driver_lock FD: 1 BD: 1 ....: thermal_cdev_ida.xa_lock FD: 1 BD: 4 ....: cpufreq_driver_lock FD: 3 BD: 1 +.+.: subsys mutex#31 ->&k->k_lock FD: 1 BD: 1 +.+.: (x86_mce_decoder_chain).rwsem FD: 1 BD: 1 ....: virtio_index_ida.xa_lock FD: 1 BD: 1 +.+.: subsys mutex#32 FD: 178 BD: 136 +.+.: &md->mutex ->fs_reclaim ->pool_lock#2 ->irq_domain_mutex ->pci_config_lock ->&xa->xa_lock#3 ->&domain->mutex ->&irq_desc_lock_class ->vector_lock ->&root->kernfs_rwsem ->lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 2 BD: 137 +.+.: &xa->xa_lock#3 ->pool_lock#2 FD: 1 BD: 1 +.+.: &dev->vqs_list_lock FD: 1 BD: 1 ....: &vp_dev->lock FD: 1 BD: 1 +.+.: (oom_notify_list).rwsem FD: 1 BD: 1 ....: &dev->config_lock FD: 1 BD: 1 +.+.: vdpa_dev_lock FD: 3 BD: 1 +.+.: subsys mutex#33 ->&k->k_lock FD: 30 BD: 4196 +.+.: &dentry->d_lock/1 ->&lru->node[i].lock FD: 264 BD: 1 +.+.: serial_mutex ->gpio_lookup_lock ->port_mutex FD: 1 BD: 2 +.+.: gpio_lookup_lock FD: 262 BD: 2 +.+.: port_mutex ->&port->mutex FD: 261 BD: 9 +.+.: &port->mutex ->fs_reclaim ->pool_lock#2 ->console_mutex ->resource_lock ->&port_lock_key ->(console_sem).lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->ctrl_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&dev->power.lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->&c->lock ->&zone->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#34 ->semaphore->lock ->*(&acpi_gbl_reference_count_lock) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->gdp_mutex ->req_lock ->&p->pi_lock ->&x->wait#11 ->subsys mutex#21 ->chrdevs_lock ->&cfs_rq->removed.lock ->hash_mutex ->&i->lock ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock FD: 1 BD: 10 ....: ctrl_ida.xa_lock FD: 1 BD: 10 +.+.: subsys mutex#34 FD: 1 BD: 1 ....: rng_index_ida.xa_lock FD: 132 BD: 1 +.+.: rng_mutex ->&x->wait#13 ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock FD: 29 BD: 2 -.-.: &x->wait#12 ->&p->pi_lock FD: 1 BD: 2 ....: &x->wait#13 FD: 31 BD: 1 +.+.: reading_mutex ->reading_mutex.wait_lock ->&rq->__lock ->&x->wait#12 FD: 1 BD: 2 +.+.: reading_mutex.wait_lock FD: 1 BD: 1 ....: &dev->managed.lock FD: 140 BD: 1 +.+.: &type->s_umount_key#23/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#21 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#21 ->&dentry->d_lock FD: 2 BD: 238 ....: drm_minor_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: &dev->debugfs_mutex FD: 3 BD: 3 +.+.: subsys mutex#35 ->&k->k_lock FD: 1 BD: 1 ....: (worker)->lock FD: 129 BD: 23 +.+.: &dev->mode_config.idr_mutex ->fs_reclaim ->pool_lock#2 FD: 152 BD: 19 +.+.: crtc_ww_class_acquire ->crtc_ww_class_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 151 BD: 20 +.+.: crtc_ww_class_mutex ->reservation_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.blob_lock ->&crtc->commit_lock ->&zone->lock ->reservation_ww_class_mutex ->tk_core.seq.seqcount ->&vkms_out->lock ->&dev->vbl_lock ->&x->wait#14 ->(work_completion)(&vkms_state->composer_work) ->&base->lock ->&rq->__lock ->(&timer.timer) ->(work_completion)(&vkms_state->composer_work)#2 FD: 1 BD: 21 +.+.: &dev->mode_config.blob_lock FD: 1 BD: 1 ....: &xa->xa_lock#4 FD: 1 BD: 1 ....: &xa->xa_lock#5 FD: 1 BD: 22 ....: &dev->mode_config.connector_list_lock FD: 20 BD: 24 ..-.: &dev->vbl_lock ->&dev->vblank_time_lock FD: 191 BD: 1 .+.+: drm_connector_list_iter ->&dev->mode_config.connector_list_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&connector->mutex FD: 189 BD: 2 +.+.: &connector->mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->&c->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&zone->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&cfs_rq->removed.lock ->&k->k_lock ->subsys mutex#35 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&dev->mode_config.idr_mutex ->connector_list_lock FD: 1 BD: 3 +.+.: connector_list_lock FD: 1 BD: 1 +.+.: &dev->filelist_mutex FD: 183 BD: 15 +.+.: &helper->lock ->fs_reclaim ->pool_lock#2 ->&client->modeset_mutex ->&obj_hash[i].lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->&mgr->vm_lock ->&dev->object_name_lock ->&node->vm_lock ->&file_private->table_lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.fb_lock ->&file->fbs_lock ->&prime_fpriv->lock ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->init_mm.page_table_lock ->&rq->__lock ->&dev->master_mutex ->&lock->wait_lock ->&pool->lock ->reservation_ww_class_mutex FD: 154 BD: 17 +.+.: &client->modeset_mutex ->&dev->mode_config.mutex ->fs_reclaim ->pool_lock#2 ->crtc_ww_class_acquire FD: 153 BD: 18 +.+.: &dev->mode_config.mutex ->crtc_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 2 BD: 16 +.+.: &mgr->vm_lock ->pool_lock#2 FD: 53 BD: 16 +.+.: &dev->object_name_lock ->lock FD: 10 BD: 238 +.+.: &file_private->table_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock FD: 4 BD: 16 +.+.: &node->vm_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 16 +.+.: &dev->mode_config.fb_lock FD: 1 BD: 16 +.+.: &file->fbs_lock FD: 1 BD: 16 +.+.: &prime_fpriv->lock FD: 232 BD: 1 +.+.: registration_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#11 ->vt_switch_mutex ->(console_sem).lock ->console_lock FD: 129 BD: 2 +.+.: vt_switch_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 13 +.+.: &fb_info->lock FD: 155 BD: 16 +.+.: &dev->master_mutex ->&client->modeset_mutex FD: 1 BD: 21 +.+.: &crtc->commit_lock FD: 133 BD: 144 +.+.: &shmem->vmap_lock ->&shmem->pages_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock FD: 129 BD: 145 +.+.: &shmem->pages_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&zone->lock ->&xa->xa_lock#6 ->lock#4 ->&info->lock ->&rq->__lock FD: 42 BD: 4156 ..-.: &xa->xa_lock#6 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&zone->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->key#10 ->&s->s_inode_wblist_lock ->&base->lock ->key#12 ->&wb->work_lock ->&n->list_lock ->&pl->lock ->key#13 ->stock_lock ->&xa->xa_lock#12 ->&____s->seqcount#2 ->key#29 FD: 2 BD: 4096 ....: &info->lock ->key#9 FD: 36 BD: 21 -.-.: &vkms_out->lock ->&dev->event_lock FD: 35 BD: 22 -.-.: &dev->event_lock ->&dev->vbl_lock ->&____s->seqcount#6 ->&x->wait#14 ->&obj_hash[i].lock ->pool_lock#2 ->&dev->vblank_time_lock ->&vblank->queue ->&base->lock FD: 1 BD: 27 ----: &____s->seqcount#6 FD: 29 BD: 23 -...: &x->wait#14 ->&p->pi_lock FD: 19 BD: 25 -.-.: &dev->vblank_time_lock ->tk_core.seq.seqcount ->&(&vblank->seqlock)->lock ->&obj_hash[i].lock ->hrtimer_bases.lock FD: 2 BD: 26 -.-.: &(&vblank->seqlock)->lock ->&____s->seqcount#6 FD: 1 BD: 21 +.+.: (work_completion)(&vkms_state->composer_work) FD: 1 BD: 17 ....: &helper->damage_lock FD: 185 BD: 2 +.+.: (work_completion)(&helper->damage_work) ->&helper->damage_lock ->&helper->lock FD: 1 BD: 3592 +.+.: &lock->wait_lock FD: 1 BD: 23 -.-.: &vblank->queue FD: 1 BD: 21 +.+.: (work_completion)(&vkms_state->composer_work)#2 FD: 1 BD: 13 ....: vt_event_lock FD: 31 BD: 1 -.-.: &vb->stop_update_lock FD: 302 BD: 1 +.+.: (wq_completion)events_freezable ->(work_completion)(&vb->update_balloon_stats_work) FD: 301 BD: 2 +.+.: (work_completion)(&vb->update_balloon_stats_work) ->cpu_hotplug_lock ->&s->s_inode_list_lock ->&rq->__lock FD: 1 BD: 1 +.+.: kernel_fb_helper_lock FD: 1 BD: 1 +.+.: &dev->clientlist_mutex FD: 1 BD: 1 +...: &dev->queue_lock FD: 1 BD: 8 ....: blk_queue_ida.xa_lock FD: 314 BD: 8 +.+.: &q->sysfs_lock ->&q->unused_hctx_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->cpu_hotplug_lock ->fs_reclaim ->&xa->xa_lock#7 ->&q->debugfs_mutex ->pcpu_alloc_mutex ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->queue_lock ->&stats->lock ->lock ->&root->kernfs_rwsem ->set->srcu ->&rq->__lock FD: 1 BD: 9 +.+.: &q->unused_hctx_lock FD: 2 BD: 11 +.+.: &xa->xa_lock#7 ->pool_lock#2 FD: 32 BD: 5 +.+.: &set->tag_list_lock ->&q->mq_freeze_lock ->percpu_ref_switch_lock FD: 2 BD: 6 +.+.: &xa->xa_lock#8 ->pool_lock#2 FD: 38 BD: 297 ....: &q->queue_lock ->&blkcg->lock ->pool_lock#2 ->pcpu_lock ->&obj_hash[i].lock ->percpu_counters_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 21 BD: 298 ....: &blkcg->lock ->pool_lock#2 ->percpu_ref_switch_lock ->(&sq->pending_timer) ->&obj_hash[i].lock ->&base->lock ->percpu_counters_lock ->pcpu_lock ->pool_lock FD: 30 BD: 10 +.+.: &q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->mq_freeze_wq ->&rq->__lock FD: 5 BD: 303 ..-.: percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 304 ....: &q->mq_freeze_wq FD: 1 BD: 13 +.+.: &bdev->bd_size_lock FD: 3 BD: 6 +.+.: subsys mutex#36 ->&k->k_lock FD: 315 BD: 6 +.+.: &q->sysfs_dir_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->&q->sysfs_lock ->&zone->lock ->&obj_hash[i].lock FD: 142 BD: 9 +.+.: &q->debugfs_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 3 BD: 5 +.+.: subsys mutex#37 ->&k->k_lock FD: 1 BD: 5 ....: cgwb_lock FD: 1 BD: 5 +...: bdi_lock FD: 62 BD: 4124 +.+.: inode_hash_lock ->&sb->s_type->i_lock_key#3 ->&sb->s_type->i_lock_key#22 ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#30 ->&sb->s_type->i_lock_key#31 FD: 1 BD: 4 +.+.: bdev_lock FD: 335 BD: 3 +.+.: &disk->open_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&xa->xa_lock#6 ->lock#4 ->mmu_notifier_invalidate_range_start ->&c->lock ->&mapping->private_lock ->tk_core.seq.seqcount ->&ret->b_uptodate_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->&base->lock ->&hctx->lock ->&x->wait#16 ->&rq->__lock ->(&timer.timer) ->&q->sysfs_dir_lock ->&bdev->bd_size_lock ->&dd->lock ->&folio_wait_table[i] ->(console_sem).lock ->console_owner_lock ->console_owner ->&s->s_inode_list_lock ->pcpu_alloc_mutex ->&x->wait#9 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&k->k_lock ->subsys mutex#36 ->&xa->xa_lock#8 ->inode_hash_lock ->bdev_lock ->&lo->lo_mutex ->nbd_index_mutex ->&nbd->config_lock ->&new->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&lock->wait_lock ->&cfs_rq->removed.lock FD: 44 BD: 4129 +.+.: &mapping->private_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&xa->xa_lock#6 FD: 30 BD: 6 ..-.: &ret->b_uptodate_lock ->bit_wait_table + i FD: 14 BD: 5 ....: floppy_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 1 ..-.: percpu_ref_switch_waitq.lock FD: 29 BD: 4 ....: command_done.lock ->&p->pi_lock FD: 17 BD: 2 +.+.: floppy_work ->dma_spin_lock ->floppy_lock ->&obj_hash[i].lock ->fdc_wait.lock FD: 1 BD: 3 ....: dma_spin_lock FD: 129 BD: 1 +.+.: loop_ctl_mutex ->fs_reclaim ->pool_lock#2 FD: 31 BD: 1 ..-.: &(&ops->cursor_work)->timer FD: 33 BD: 2 +.+.: (work_completion)(&(&ops->cursor_work)->work) ->(console_sem).lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 9 ....: &stats->lock FD: 149 BD: 9 +.+.: nbd_index_mutex ->fs_reclaim ->pool_lock#2 ->&nbd->config_lock FD: 1 BD: 16 .+.+: set->srcu FD: 35 BD: 6 +.+.: (work_completion)(&(&q->requeue_work)->work) ->&q->requeue_lock ->&hctx->lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq FD: 19 BD: 6 +.+.: (work_completion)(&(&hctx->run_work)->work) FD: 327 BD: 1 +.+.: zram_index_mutex ->fs_reclaim ->pool_lock#2 ->blk_queue_ida.xa_lock ->&obj_hash[i].lock ->pcpu_alloc_mutex ->bio_slab_lock ->&____s->seqcount ->&zone->lock ->percpu_counters_lock ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->&n->list_lock ->lock ->&q->queue_lock ->&x->wait#9 ->&bdev->bd_size_lock ->&k->list_lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&x->wait#11 ->subsys mutex#36 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->percpu_ref_switch_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#37 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->inode_hash_lock ->(console_sem).lock FD: 3 BD: 1 +.+.: subsys mutex#38 ->&k->k_lock FD: 130 BD: 2 +.+.: &default_group_class[depth - 1]#2 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 2 BD: 1 +.+.: &lock ->nullb_indexes.xa_lock FD: 1 BD: 2 ....: nullb_indexes.xa_lock FD: 1 BD: 1 +.+.: ctx_list.lock FD: 1 BD: 1 ....: nfc_index_ida.xa_lock FD: 183 BD: 7 +.+.: nfc_devlist_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->subsys mutex#39 ->&c->lock ->&____s->seqcount ->&zone->lock ->&k->k_lock ->&genl_data->genl_data_mutex ->&rq->__lock FD: 3 BD: 8 +.+.: subsys mutex#39 ->&k->k_lock FD: 1 BD: 79 ....: &rfkill->lock FD: 30 BD: 15 +.+.: subsys mutex#40 ->&k->k_lock ->&rq->__lock FD: 188 BD: 6 +.+.: (work_completion)(&rfkill->sync_work) ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 15 +.+.: rfkill_global_mutex.wait_lock FD: 1 BD: 1 +.+.: dma_heap_minors.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#41 ->&k->k_lock FD: 1 BD: 1 +.+.: heap_list_lock FD: 1 BD: 1 ....: host_index_ida.xa_lock FD: 161 BD: 1 +.+.: scsi_sense_cache_mutex ->slab_mutex FD: 28 BD: 4 +.+.: subsys mutex#42 ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#43 ->&k->k_lock FD: 1 BD: 217 ....: &dev->power.wait_queue FD: 1 BD: 155 -.-.: &virtscsi_vq->vq_lock FD: 349 BD: 3 +.+.: &shost->scan_mutex ->fs_reclaim ->pool_lock#2 ->shost->host_lock ->&dev->power.lock ->&x->wait#9 ->&obj_hash[i].lock ->attribute_container_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->blk_queue_ida.xa_lock ->pcpu_alloc_mutex ->&q->sysfs_lock ->&set->tag_list_lock ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&hctx->lock ->&base->lock ->&x->wait#16 ->&rq->__lock ->(&timer.timer) ->&sdev->state_mutex ->&q->mq_freeze_lock ->&q->mq_freeze_wq ->percpu_ref_switch_lock ->(&q->timeout) ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->cpu_hotplug_lock ->&xa->xa_lock#7 ->&q->unused_hctx_lock ->(work_completion)(&sdev->requeue_work) ->(work_completion)(&sdev->event_work) ->pcpu_lock ->&sdev->inquiry_mutex ->(console_sem).lock ->console_owner_lock ->console_owner ->&cfs_rq->removed.lock ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&x->wait#15 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#42 ->device_links_srcu ->async_lock ->gdp_mutex ->subsys mutex#44 ->bsg_minor_ida.xa_lock ->chrdevs_lock ->req_lock ->&x->wait#11 ->subsys mutex#53 FD: 1 BD: 4 ....: shost->host_lock FD: 2 BD: 3 +.+.: async_scan_lock ->&x->wait#15 FD: 1 BD: 5 ....: &x->wait#15 FD: 1 BD: 142 +.+.: &hctx->lock FD: 29 BD: 5 ..-.: &x->wait#16 ->&p->pi_lock FD: 1 BD: 4 +.+.: &sdev->state_mutex FD: 31 BD: 4 +.-.: (&q->timeout) FD: 36 BD: 5 +.+.: (work_completion)(&q->timeout_work) ->&tags->lock ->&obj_hash[i].lock ->&base->lock ->&fq->mq_flush_lock FD: 1 BD: 4 +.+.: (work_completion)(&sdev->requeue_work) FD: 1 BD: 4 +.+.: (work_completion)(&sdev->event_work) FD: 1 BD: 4 +.+.: &sdev->inquiry_mutex FD: 186 BD: 4 +.+.: subsys mutex#44 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 ->lock ->chrdevs_lock ->&x->wait#9 ->&obj_hash[i].lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#52 ->(console_sem).lock ->console_owner_lock ->console_owner ->&cfs_rq->removed.lock FD: 1 BD: 1 +.+.: nvmf_hosts_mutex FD: 3 BD: 1 +.+.: subsys mutex#45 ->&k->k_lock FD: 1 BD: 1 +.+.: nvmf_transports_rwsem FD: 3 BD: 1 +.+.: subsys mutex#46 ->&k->k_lock FD: 135 BD: 3 +.+.: &default_group_class[depth - 1]#3/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#4/2 FD: 1 BD: 1 +.+.: nvmet_config_sem FD: 3 BD: 1 +.+.: subsys mutex#47 ->&k->k_lock FD: 134 BD: 4 +.+.: &default_group_class[depth - 1]#4/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#5/2 FD: 133 BD: 5 +.+.: &default_group_class[depth - 1]#5/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#6 ->&default_group_class[depth - 1]#6/2 FD: 130 BD: 6 +.+.: &default_group_class[depth - 1]#6 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 1 BD: 6 +.+.: &default_group_class[depth - 1]#6/2 FD: 1 BD: 1 +.+.: backend_mutex FD: 1 BD: 1 +.+.: scsi_mib_index_lock FD: 1 BD: 1 +.+.: hba_lock FD: 129 BD: 1 +.+.: device_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: &hba->device_lock FD: 341 BD: 1 +.+.: mtd_table_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#48 ->devtree_lock ->nvmem_ida.xa_lock ->nvmem_cell_mutex ->&k->k_lock ->subsys mutex#49 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(console_sem).lock ->pcpu_alloc_mutex ->cpu_hotplug_lock ->&n->list_lock ->&zone->lock ->batched_entropy_u32.lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->&q->mq_freeze_lock ->set->srcu ->percpu_ref_switch_lock ->&q->queue_lock ->&bdev->bd_size_lock ->elv_list_lock ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->&q->debugfs_mutex ->&cfs_rq->removed.lock ->subsys mutex#36 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->subsys mutex#37 ->cgwb_lock ->bdi_lock ->inode_hash_lock FD: 1 BD: 1 +.+.: part_parser_lock FD: 1 BD: 86 ....: (kmod_concurrent_max).lock FD: 29 BD: 87 ....: &x->wait#17 ->&p->pi_lock FD: 1 BD: 148 ....: &prev->lock FD: 3 BD: 2 +.+.: subsys mutex#48 ->&k->k_lock FD: 1 BD: 2 ....: nvmem_ida.xa_lock FD: 1 BD: 2 +.+.: nvmem_cell_mutex FD: 1 BD: 2 +.+.: subsys mutex#49 FD: 910 BD: 1 +.+.: (wq_completion)gid-cache-wq ->(work_completion)(&ndev_work->work) ->(work_completion)(&work->work) ->&rq->__lock FD: 908 BD: 2 +.+.: (work_completion)(&ndev_work->work) ->devices_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->&base->lock ->quarantine_lock FD: 1 BD: 3417 +.+.: &bond->stats_lock FD: 34 BD: 89 ....: lweventlist_lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&____s->seqcount#2 FD: 872 BD: 2 +.+.: (linkwatch_work).work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->stock_lock FD: 1 BD: 3517 +.+.: rtnl_mutex.wait_lock FD: 3 BD: 3426 ..-.: once_lock ->crngs.lock FD: 301 BD: 2 +.+.: (work_completion)(&w->work) ->cpu_hotplug_lock ->&obj_hash[i].lock FD: 28 BD: 72 ++++: (inet6addr_validator_chain).rwsem ->&rq->__lock FD: 28 BD: 72 ++++: (inetaddr_validator_chain).rwsem ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#50 ->&k->k_lock FD: 1 BD: 1 +.+.: mdio_board_lock FD: 1 BD: 1 +.+.: mode_list_lock FD: 1 BD: 73 +.+.: napi_hash_lock FD: 133 BD: 136 +.+.: xps_map_mutex ->fs_reclaim ->pool_lock#2 ->jump_label_mutex ->&obj_hash[i].lock ->krc.lock ->&rq->__lock FD: 1 BD: 2 +.+.: (work_completion)(&vi->config_work) FD: 1 BD: 1 +.+.: l3mdev_lock FD: 1 BD: 3 ....: sd_index_ida.xa_lock FD: 3 BD: 3 +.+.: subsys mutex#51 ->&k->k_lock FD: 2 BD: 238 ....: sg_index_lock ->pool_lock#2 FD: 3 BD: 5 +.+.: subsys mutex#52 ->&k->k_lock FD: 1 BD: 4 ....: bsg_minor_ida.xa_lock FD: 3 BD: 4 +.+.: subsys mutex#53 ->&k->k_lock FD: 3 BD: 1 +.+.: subsys mutex#54 ->&k->k_lock FD: 2 BD: 1 +.+.: compressor_list_lock ->pool_lock#2 FD: 1 BD: 161 +.+.: &dd->lock FD: 29 BD: 4123 ..-.: &folio_wait_table[i] ->&p->pi_lock FD: 44 BD: 1 +.+.: (wq_completion)kblockd ->(work_completion)(&(&hctx->run_work)->work) ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&q->requeue_work)->work) FD: 1 BD: 5 ....: hwsim_netgroup_ida.xa_lock FD: 34 BD: 3454 +.-.: hwsim_radio_lock ->pool_lock#2 ->&list->lock#19 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&zone->lock ->init_task.mems_allowed_seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 3 BD: 10 +.+.: subsys mutex#55 ->&k->k_lock FD: 597 BD: 74 +.+.: &rdev->wiphy.mtx ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->&zone->lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#56 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->nl_table_lock ->nl_table_wait.lock ->reg_requests_lock ->stack_depot_init_mutex ->pcpu_alloc_mutex ->&local->iflist_mtx ->net_rwsem ->&x->wait#9 ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->&wdev->mtx ->&fq->lock ->&rq->__lock ->&n->list_lock ->&cfs_rq->removed.lock ->rlock-AF_NETLINK ->lweventlist_lock ->&pool->lock ->rcu_node_0 ->&data->mutex ->&base->lock ->&tn->lock ->failover_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&idev->mc_lock ->&pnettable->lock ->smc_ib_devices.mutex ->&ndev->lock ->&wdev->event_lock ->&rdev->mgmt_registrations_lock ->(work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) ->&local->key_mtx ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->mount_lock ->&rdev->wiphy_work_lock ->(&dwork->timer) ->(work_completion)(&(&link->color_collision_detect_work)->work) ->&local->chanctx_mtx ->rtnl_mutex.wait_lock ->&p->pi_lock ->&lock->wait_lock ->&list->lock#18 ->lock#6 ->&____s->seqcount#2 ->remove_cache_srcu ->&local->mtx ->&local->sta_mtx ->_xmit_ETHER ->(&local->dynamic_ps_timer) ->(work_completion)(&local->dynamic_ps_enable_work) ->(work_completion)(&sdata->recalc_smps) ->(work_completion)(&link->csa_finalize_work) ->(work_completion)(&link->color_change_finalize_work) ->(work_completion)(&(&link->dfs_cac_timer_work)->work) ->&local->queue_stop_reason_lock ->&list->lock#19 ->(work_completion)(&local->reconfig_filter) ->&wq->mutex ->&list->lock#26 ->rcu_state.exp_mutex.wait_lock ->quarantine_lock ->&rnp->exp_wq[1] ->cpu_hotplug_lock ->bpf_devs_lock ->&in_dev->mc_tomb_lock ->class ->(&tbl->proxy_timer) ->&ul->lock ->&net->xdp.lock ->krc.lock ->mirred_list_lock ->&nft_net->commit_mutex ->&ent->pde_unload_lock ->&net->ipv6.addrconf_hash_lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->&idev->mc_report_lock ->&pnn->pndevs.lock ->&pnn->routes.lock ->target_list_lock ->kernfs_idr_lock ->dev_pm_qos_sysfs_mtx ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->batched_entropy_u8.lock ->kfence_freelist_lock ->(wq_completion)phy15 ->(wq_completion)phy16 ->stock_lock ->&sem->wait_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&local->filter_lock ->&data->lock ->&ul->lock#2 ->&rnp->exp_lock ->rcu_state.exp_mutex ->&meta->lock ->&rcu_state.expedited_wq ->&app->lock ->(&app->join_timer) ->(&app->periodic_timer) ->&list->lock#14 ->(&app->join_timer)#2 ->&app->lock#2 ->&list->lock#15 ->&vlan_netdev_addr_lock_key/1 ->(inetaddr_chain).rwsem ->(wq_completion)phy24 ->(wq_completion)phy25 ->(wq_completion)phy26 ->(wq_completion)phy21 ->(wq_completion)phy22 ->(wq_completion)phy27 ->(wq_completion)phy23 FD: 3 BD: 75 +.+.: subsys mutex#56 ->&k->k_lock FD: 1 BD: 75 +.+.: reg_requests_lock FD: 1 BD: 75 +.+.: &local->iflist_mtx FD: 184 BD: 78 +.+.: &wdev->mtx ->&rdev->bss_lock ->&local->chanctx_mtx ->&rdev->wiphy_work_lock ->&ifibss->incomplete_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->&local->mtx ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hrtimer_bases.lock ->&obj_hash[i].lock ->&base->lock ->&wdev->event_lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount ->&list->lock#2 ->&c->lock ->&sta->lock ->&local->sta_mtx ->lweventlist_lock ->&____s->seqcount#2 ->&n->list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->quarantine_lock ->remove_cache_srcu ->&wdev->pmsr_lock ->krc.lock ->&list->lock#18 ->(&ifibss->timer) ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&data->lock ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 8 BD: 3453 +.-.: &fq->lock ->tk_core.seq.seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&zone->lock FD: 3 BD: 72 +.+.: subsys mutex#57 ->&k->k_lock FD: 29 BD: 73 +.+.: &sdata->sec_mtx ->&sec->lock ->&rq->__lock FD: 1 BD: 74 ++..: &sec->lock FD: 1 BD: 72 +.+.: &local->iflist_mtx#2 FD: 129 BD: 1 +.+.: hwsim_phys_lock ->fs_reclaim ->pool_lock#2 FD: 129 BD: 1 +.+.: xdomain_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ioctl_mutex FD: 1 BD: 1 +.+.: address_handler_list_lock FD: 1 BD: 1 +.+.: card_mutex FD: 3 BD: 1 +.+.: subsys mutex#58 ->&k->k_lock FD: 29 BD: 1 ....: &x->wait#18 ->&p->pi_lock FD: 31 BD: 2 ..-.: &txlock ->&list->lock#3 ->&txwq FD: 1 BD: 3 ..-.: &list->lock#3 FD: 29 BD: 3 ..-.: &txwq ->&p->pi_lock FD: 2 BD: 1 ....: &iocq[i].lock ->&ktiowq[i] FD: 1 BD: 2 ....: &ktiowq[i] FD: 1 BD: 1 ....: rcu_read_lock_bh FD: 3 BD: 3497 +.-.: noop_qdisc.q.lock ->crngs.lock FD: 3 BD: 3 +.+.: subsys mutex#59 ->&k->k_lock FD: 228 BD: 1 +.+.: usb_bus_idr_lock ->(usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&c->lock ->&zone->lock ->&____s->seqcount ->hcd_root_hub_lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&dev->power.lock ->device_links_srcu ->(console_sem).lock ->console_owner_lock ->console_owner ->input_pool.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->device_state_lock ->&dum_hcd->dum->lock ->subsys mutex#60 ->&x->wait#9 ->&lock->wait_lock ->&hub->irq_urb_lock ->(&hub->irq_urb_retry) ->&base->lock ->hcd_urb_unlink_lock ->&cfs_rq->removed.lock ->(work_completion)(&hub->tt.clear_work) ->hcd_urb_list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&vhci_hcd->vhci->lock FD: 172 BD: 1 +.+.: table_lock ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->&k->k_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&rq->__lock ->(console_sem).lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 31 BD: 1 ..-.: net/core/link_watch.c:31 FD: 1 BD: 3 +.+.: mon_lock FD: 173 BD: 2 +.+.: usb_port_peer_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->dev_pm_qos_mtx ->component_mutex ->device_links_srcu ->dev_pm_qos_sysfs_mtx ->&rq->__lock ->sysfs_symlink_target_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 4 ....: device_state_lock FD: 31 BD: 8 ....: hcd_root_hub_lock ->hcd_urb_list_lock ->&bh->lock ->&p->pi_lock FD: 1 BD: 9 ....: hcd_urb_list_lock FD: 1 BD: 9 ..-.: &bh->lock FD: 12 BD: 93 ..-.: lock#6 ->kcov_remote_lock ->&kcov->lock FD: 10 BD: 149 ..-.: kcov_remote_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 29 BD: 8 ..-.: &x->wait#19 ->&p->pi_lock FD: 1 BD: 2 +.+.: set_config_lock FD: 138 BD: 2 +.+.: hcd->bandwidth_mutex ->devtree_lock ->&obj_hash[i].lock ->&x->wait#9 ->&dev->power.lock ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&rq->__lock ->&x->wait#19 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 2 +.+.: &new_driver->dynids.lock FD: 1 BD: 5 ....: &dum_hcd->dum->lock FD: 136 BD: 4 +.+.: &hub->status_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->hcd_root_hub_lock ->fs_reclaim ->&dum_hcd->dum->lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&base->lock ->&pool->lock ->(&timer.timer) ->&____s->seqcount ->&zone->lock ->&vhci_hcd->vhci->lock ->&c->lock FD: 1 BD: 3 +.+.: component_mutex FD: 145 BD: 1 +.+.: (wq_completion)pm ->(work_completion)(&dev->power.work) FD: 144 BD: 2 +.+.: (work_completion)(&dev->power.work) ->&dev->power.lock ->&hub->irq_urb_lock ->(&hub->irq_urb_retry) ->&obj_hash[i].lock ->&base->lock ->hcd_urb_unlink_lock ->hcd_root_hub_lock ->usb_kill_urb_queue.lock ->&pool->lock ->&rq->__lock ->(work_completion)(&hub->tt.clear_work) ->&vhci_hcd->vhci->lock ->device_state_lock ->hcd_urb_list_lock ->mmu_notifier_invalidate_range_start ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->&x->wait#19 ->(&timer.timer) FD: 138 BD: 2 +.+.: (work_completion)(&(&hub->init_work)->work) FD: 1 BD: 2 +.+.: subsys mutex#60 FD: 38 BD: 1 +.+.: (wq_completion)usb_hub_wq ->(work_completion)(&hub->events) FD: 37 BD: 2 +.+.: (work_completion)(&hub->events) ->lock#6 ->&dev->power.lock FD: 1 BD: 4 ....: &hub->irq_urb_lock FD: 1 BD: 4 ....: (&hub->irq_urb_retry) FD: 1 BD: 4 ....: hcd_urb_unlink_lock FD: 29 BD: 3 ..-.: usb_kill_urb_queue.lock ->&p->pi_lock FD: 1 BD: 4 +.+.: (work_completion)(&hub->tt.clear_work) FD: 33 BD: 1 ..-.: drivers/block/floppy.c:640 FD: 38 BD: 1 +.+.: (fd_timeout).work ->&obj_hash[i].lock ->floppy_work ->dma_spin_lock ->floppy_lock ->command_done.lock FD: 1 BD: 3 ....: fdc_wait.lock FD: 1 BD: 90 +.+.: &ent->pde_unload_lock FD: 1 BD: 3 ....: (&motor_off_timer[drive]) FD: 1 BD: 299 ....: (&sq->pending_timer) FD: 1 BD: 3 +.+.: (work_completion)(&td->dispatch_work) FD: 39 BD: 5 +.+.: &q->blkcg_mutex ->&q->queue_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 134 BD: 4118 .+.+: &fsnotify_mark_srcu ->&conn->lock ->fs_reclaim ->pool_lock#2 ->&group->notification_lock ->&group->notification_waitq ->&____s->seqcount ->&c->lock ->&obj_hash[i].lock ->&rq->__lock ->&cfs_rq->removed.lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&n->list_lock ->rcu_node_0 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq FD: 42 BD: 2 +.+.: (work_completion)(&blkg->free_work) ->&q->blkcg_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&xa->xa_lock#7 ->pcpu_lock ->blk_queue_ida.xa_lock ->percpu_ref_switch_lock FD: 1 BD: 8 +.+.: udc_lock FD: 3 BD: 1 +.+.: subsys mutex#61 ->&k->k_lock FD: 1 BD: 1 ....: gadget_id_numbers.xa_lock FD: 136 BD: 2 +.+.: (work_completion)(&gadget->work) ->&root->kernfs_rwsem ->kernfs_notify_lock FD: 31 BD: 161 ....: kernfs_notify_lock FD: 65 BD: 2 +.+.: kernfs_notify_work ->kernfs_notify_lock ->&root->kernfs_supers_rwsem FD: 63 BD: 7 ++++: &root->kernfs_supers_rwsem ->inode_hash_lock FD: 1 BD: 1 +.+.: subsys mutex#62 FD: 1 BD: 1 +.+.: func_lock FD: 1 BD: 1 +.+.: g_tf_lock FD: 1 BD: 7 ....: &vhci_hcd->vhci->lock FD: 21 BD: 1 +.-.: (&vblank->disable_timer) ->&dev->vbl_lock FD: 36 BD: 7 -...: i8042_lock ->(console_sem).lock ->&x->wait#20 FD: 29 BD: 8 -...: &x->wait#20 ->&p->pi_lock FD: 31 BD: 4 ....: serio_event_lock ->pool_lock#2 FD: 253 BD: 1 +.+.: (wq_completion)events_long ->serio_event_work ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&br->gc_work)->work) ->(work_completion)(&br->mcast_gc_work) ->(work_completion)(&(&ipvs->est_reload_work)->work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 225 BD: 2 +.+.: serio_event_work ->serio_mutex FD: 224 BD: 3 +.+.: serio_mutex ->serio_event_lock ->i8042_lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->&device->physical_node_lock ->semaphore->lock ->&obj_hash[i].lock ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#63 ->bus_type_sem FD: 1 BD: 4 +.+.: subsys mutex#63 FD: 2 BD: 7 ....: input_ida.xa_lock ->pool_lock#2 FD: 39 BD: 7 +.+.: &mousedev->mutex/1 ->&mousedev->mutex#2 FD: 211 BD: 4 +.+.: &serio->drv_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&serio->lock ->i8042_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->(console_sem).lock ->&rq->__lock ->input_mutex ->i8042_lock ->psmouse_mutex FD: 36 BD: 7 -...: &serio->lock ->&ps2dev->wait ->&dev->power.lock ->&dev->event_lock#2 FD: 46 BD: 6 +.+.: i8042_mutex ->&serio->lock ->i8042_lock ->&ps2dev->wait ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) FD: 1 BD: 8 -...: &ps2dev->wait FD: 1 BD: 1 ....: rtc_ida.xa_lock FD: 2 BD: 1 +.+.: &rtc->ops_lock ->rtc_lock FD: 1 BD: 2 ....: platform_devid_ida.xa_lock FD: 1 BD: 2 ....: rtcdev_lock FD: 185 BD: 7 +.+.: &led_cdev->led_access ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#64 ->leds_list_lock ->triggers_list_lock FD: 3 BD: 8 +.+.: subsys mutex#64 ->&k->k_lock FD: 142 BD: 24 +.+.: &led_cdev->trigger_lock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&trig->leddev_list_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock FD: 1 BD: 25 +.+.: &trig->leddev_list_lock FD: 1 BD: 27 -...: &dev->event_lock#2 FD: 210 BD: 5 +.+.: psmouse_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&serio->lock ->i8042_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->(console_sem).lock ->console_owner_lock ->console_owner ->&cfs_rq->removed.lock ->input_mutex FD: 129 BD: 1 +.+.: g_smscore_deviceslock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: cx231xx_devlist_mutex FD: 1 BD: 1 +.+.: em28xx_devlist_mutex FD: 1 BD: 1 ....: pvr2_context_sync_data.lock FD: 1 BD: 15 +.+.: i2c_dev_list_lock FD: 3 BD: 8 +.+.: subsys mutex#65 ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#66 FD: 188 BD: 2 +.+.: dvbdev_register_lock ->(console_sem).lock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->minor_rwsem ->&xa->xa_lock#9 ->&mdev->graph_mutex ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#67 FD: 189 BD: 1 +.+.: frontend_mutex ->fs_reclaim ->pool_lock#2 ->(console_sem).lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->dvbdev_register_lock FD: 1 BD: 3 +.+.: minor_rwsem FD: 2 BD: 3 ....: &xa->xa_lock#9 ->pool_lock#2 FD: 129 BD: 4 +.+.: &mdev->graph_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 3 BD: 3 +.+.: subsys mutex#67 ->&k->k_lock FD: 1 BD: 1 ....: &dmxdev->lock FD: 1 BD: 1 +.+.: &dvbdemux->mutex FD: 1 BD: 1 +.+.: media_devnode_lock FD: 1 BD: 1 +.+.: subsys mutex#68 FD: 1 BD: 1 +.+.: videodev_lock FD: 3 BD: 1 +.+.: subsys mutex#69 ->&k->k_lock FD: 1 BD: 1 +.+.: vimc_sensor:393:(&vsensor->hdl)->_lock FD: 1 BD: 1 +.+.: &v4l2_dev->lock FD: 1 BD: 1 +.+.: vimc_debayer:578:(&vdebayer->hdl)->_lock FD: 1 BD: 1 +.+.: vimc_lens:61:(&vlens->hdl)->_lock FD: 139 BD: 1 +.+.: vivid_ctrls:1606:(hdl_user_gen)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->vivid_ctrls:1627:(hdl_vbi_out)->_lock ->vivid_ctrls:1630:(hdl_radio_rx)->_lock ->vivid_ctrls:1632:(hdl_radio_tx)->_lock ->vivid_ctrls:1634:(hdl_sdr_cap)->_lock ->vivid_ctrls:1636:(hdl_meta_cap)->_lock ->vivid_ctrls:1638:(hdl_meta_out)->_lock ->vivid_ctrls:1640:(hdl_tch_cap)->_lock ->&zone->lock ->&obj_hash[i].lock ->&rq->__lock FD: 130 BD: 1 +.+.: vivid_ctrls:1608:(hdl_user_vid)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&zone->lock FD: 133 BD: 1 +.+.: vivid_ctrls:1610:(hdl_user_aud)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1630:(hdl_radio_rx)->_lock ->vivid_ctrls:1632:(hdl_radio_tx)->_lock ->&c->lock ->&____s->seqcount FD: 137 BD: 1 +.+.: vivid_ctrls:1612:(hdl_streaming)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->vivid_ctrls:1627:(hdl_vbi_out)->_lock ->vivid_ctrls:1634:(hdl_sdr_cap)->_lock ->vivid_ctrls:1636:(hdl_meta_cap)->_lock ->vivid_ctrls:1638:(hdl_meta_out)->_lock ->vivid_ctrls:1640:(hdl_tch_cap)->_lock ->&zone->lock FD: 131 BD: 1 +.+.: vivid_ctrls:1614:(hdl_sdtv_cap)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 131 BD: 1 +.+.: vivid_ctrls:1616:(hdl_loop_cap)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 1 +.+.: vivid_ctrls:1618:(hdl_fb)->_lock FD: 1 BD: 7 +.+.: vivid_ctrls:1620:(hdl_vid_cap)->_lock FD: 1 BD: 4 +.+.: vivid_ctrls:1622:(hdl_vid_out)->_lock FD: 1 BD: 5 +.+.: vivid_ctrls:1625:(hdl_vbi_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1627:(hdl_vbi_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1630:(hdl_radio_rx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1632:(hdl_radio_tx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1634:(hdl_sdr_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1636:(hdl_meta_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1638:(hdl_meta_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1640:(hdl_tch_cap)->_lock FD: 1 BD: 1 ....: &adap->kthread_waitq FD: 1 BD: 1 +.+.: cec_devnode_lock FD: 1 BD: 1 +.+.: subsys mutex#70 FD: 6 BD: 1 +.+.: &adap->lock ->tk_core.seq.seqcount ->&adap->devnode.lock_fhs FD: 1 BD: 2 +.+.: &adap->devnode.lock_fhs FD: 1 BD: 1 +.+.: &dev->cec_xfers_slock FD: 1 BD: 1 ....: &dev->kthread_waitq_cec FD: 31 BD: 1 ..-.: &(&tbl->gc_work)->timer FD: 51 BD: 2 +.+.: (work_completion)(&(&tbl->gc_work)->work) ->&tbl->lock ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 ....: ptp_clocks_map.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#71 ->&k->k_lock FD: 1 BD: 1 +.+.: pers_lock FD: 1 BD: 1 +.+.: _lock FD: 1 BD: 3 +.+.: dm_bufio_clients_lock FD: 1 BD: 1 +.+.: _ps_lock FD: 1 BD: 1 +.+.: _lock#2 FD: 1 BD: 1 +.+.: _lock#3 FD: 1 BD: 1 +.+.: register_lock#2 FD: 3 BD: 1 +.+.: subsys mutex#72 ->&k->k_lock FD: 1 BD: 1 .+.+: bp_lock FD: 3 BD: 1 +.+.: subsys mutex#73 ->&k->k_lock FD: 17 BD: 1 +.-.: (&dsp_spl_tl) ->dsp_lock FD: 16 BD: 2 ..-.: dsp_lock ->iclock_lock ->&obj_hash[i].lock ->&base->lock FD: 5 BD: 3 ...-: iclock_lock ->tk_core.seq.seqcount FD: 136 BD: 74 +.+.: lock#7 ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#11 ->crngs.lock ->&xa->xa_lock#17 ->&id_priv->qp_mutex ->&id_priv->lock ->&rq->__lock ->&xa->xa_lock#18 ->&cm_id_priv->lock ->&c->lock ->&n->list_lock FD: 1 BD: 1 +.+.: intf_mutex FD: 1 BD: 1 ....: iscsi_transport_lock FD: 3 BD: 1 +.+.: subsys mutex#74 ->&k->k_lock FD: 910 BD: 2 ++++: link_ops_rwsem ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->(console_sem).lock ->&pdata->netdev_lock ->ndev_hash_lock ->crypto_alg_sem ->devices_rwsem ->&rxe->usdev_lock ->&c->lock ->rtnl_mutex ->&device->cache_lock ->rdmacg_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#83 ->&zone->lock ->&____s->seqcount ->&rq->__lock ->rcu_node_0 ->&____s->seqcount#2 ->&n->list_lock ->&cfs_rq->removed.lock ->&sem->wait_lock ->&p->pi_lock ->uevent_sock_mutex ->remove_cache_srcu ->&xa->xa_lock#17 ->krc.lock ->&xa->xa_lock#15 ->&rcu_state.expedited_wq ->devices_rwsem.wait_lock ->rtnl_mutex.wait_lock FD: 189 BD: 1 +.+.: disable_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->subsys mutex#3 FD: 1 BD: 1 ....: &tx_task->waiting FD: 3 BD: 1 +.+.: subsys mutex#75 ->&k->k_lock FD: 1 BD: 1 +.+.: service_lock FD: 1 BD: 1 +.+.: vsock_register_mutex FD: 1 BD: 1 +.+.: comedi_drivers_list_lock FD: 3 BD: 6 +.+.: subsys mutex#76 ->&k->k_lock FD: 169 BD: 2 ++++: snd_ctl_layer_rwsem ->snd_ctl_led_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->sysfs_symlink_target_lock ->&zone->lock FD: 1 BD: 3 +.+.: snd_card_mutex FD: 1 BD: 1 +.+.: snd_ioctl_rwsem FD: 129 BD: 2 +.+.: strings ->fs_reclaim ->pool_lock#2 FD: 1 BD: 2 +.+.: register_mutex FD: 184 BD: 3 +.+.: sound_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&c->lock ->&zone->lock ->&____s->seqcount ->req_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#76 ->&k->k_lock FD: 194 BD: 1 +.+.: register_mutex#2 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->&obj_hash[i].lock ->register_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->sound_oss_mutex ->strings ->&entry->access ->info_mutex FD: 186 BD: 1 +.+.: register_mutex#3 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->clients_lock ->&zone->lock ->&____s->seqcount ->&rq->__lock ->&obj_hash[i].lock FD: 1 BD: 5 ....: clients_lock FD: 2 BD: 1 +.+.: &client->ports_mutex ->&client->ports_lock FD: 1 BD: 5 .+.+: &client->ports_lock FD: 187 BD: 1 +.+.: register_mutex#4 ->fs_reclaim ->pool_lock#2 ->sound_oss_mutex FD: 186 BD: 3 +.+.: sound_oss_mutex ->fs_reclaim ->pool_lock#2 ->sound_loader_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#76 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&k->k_lock ->&zone->lock ->&cfs_rq->removed.lock FD: 1 BD: 4 +.+.: sound_loader_lock FD: 132 BD: 1 .+.+: &grp->list_mutex/1 ->clients_lock ->&client->ports_lock ->register_lock#3 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 2 BD: 1 +.+.: &grp->list_mutex#2 ->&grp->list_lock FD: 1 BD: 2 ....: &grp->list_lock FD: 139 BD: 2 +.+.: async_lookup_work ->fs_reclaim ->pool_lock#2 ->clients_lock ->&client->ports_lock ->snd_card_mutex ->(kmod_concurrent_max).lock ->&obj_hash[i].lock ->&x->wait#17 ->&pool->lock ->&rq->__lock ->&zone->lock ->&____s->seqcount ->running_helpers_waitq.lock ->autoload_work ->&x->wait#10 FD: 4 BD: 3 +.+.: autoload_work ->&k->list_lock ->&k->k_lock FD: 1 BD: 2 ....: register_lock#3 FD: 172 BD: 1 ++++: &card->controls_rwsem ->&xa->xa_lock#10 ->fs_reclaim ->&card->ctl_files_rwlock ->snd_ctl_layer_rwsem ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 9 BD: 2 +.+.: &xa->xa_lock#10 ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 2 ....: &card->ctl_files_rwlock FD: 1 BD: 3 +.+.: snd_ctl_led_mutex FD: 1 BD: 1 +.+.: register_mutex#5 FD: 1 BD: 75 +.+.: failover_lock FD: 10 BD: 5 +...: llc_sap_list_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock FD: 129 BD: 1 +.+.: act_id_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 72 ++++: act_mod_lock FD: 1 BD: 1 +.+.: ife_mod_lock FD: 1 BD: 81 +.+.: nf_connlabels_lock FD: 1 BD: 72 ++++: cls_mod_lock FD: 1 BD: 1 +.+.: ematch_mod_lock FD: 135 BD: 2 +.+.: sock_diag_table_mutex ->nlk_cb_mutex-SOCK_DIAG FD: 135 BD: 1 +.+.: nfnl_subsys_acct ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->nlk_cb_mutex-NETFILTER FD: 1 BD: 1 +.+.: nfnl_subsys_queue FD: 34 BD: 1 +.+.: nfnl_subsys_ulog ->&log->instances_lock ->&inst->lock ->nf_log_mutex ->&rq->__lock ->&lock->wait_lock FD: 28 BD: 6 +.+.: nf_log_mutex ->&rq->__lock FD: 1 BD: 1 +.+.: nfnl_subsys_osf FD: 32 BD: 7 +.+.: nf_sockopt_mutex ->&rq->__lock ->nf_sockopt_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 146 BD: 2 +.+.: nfnl_subsys_ctnetlink ->nlk_cb_mutex-NETFILTER ->pool_lock#2 ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&lock->wait_lock ->nf_conntrack_mutex ->&cfs_rq->removed.lock ->rlock-AF_NETLINK ->(console_sem).lock ->console_owner_lock ->console_owner ->fs_reclaim FD: 1 BD: 1 +.+.: nfnl_subsys_ctnetlink_exp FD: 1 BD: 5 +.+.: nf_ct_ecache_mutex FD: 135 BD: 1 +.+.: nfnl_subsys_cttimeout ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->nf_conntrack_mutex ->krc.lock ->&c->lock FD: 1 BD: 1 +.+.: nfnl_subsys_cthelper FD: 1 BD: 1 +.+.: nf_ct_helper_mutex FD: 1 BD: 1 +...: nf_conntrack_expect_lock FD: 38 BD: 10 +.+.: nf_conntrack_mutex ->&nf_conntrack_locks[i] ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->nf_conntrack_mutex.wait_lock ->&pool->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&____s->seqcount#7 ->&nf_conntrack_locks[i]/1 ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 1 BD: 1 +.+.: nf_ct_nat_helpers_mutex FD: 327 BD: 2 +.+.: nfnl_subsys_nftables ->&nft_net->commit_mutex ->&rq->__lock FD: 1 BD: 1 +.+.: nfnl_subsys_nftcompat FD: 1087 BD: 1 +.+.: masq_mutex ->pernet_ops_rwsem ->(inetaddr_chain).rwsem ->inet6addr_chain.lock FD: 238 BD: 5 +.+.: &xt[i].mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount ->&mm->mmap_lock ->free_vmap_area_lock ->vmap_area_lock ->&per_cpu(xt_recseq, i) ->&obj_hash[i].lock ->purge_vmap_area_lock ->rcu_node_0 ->&rq->__lock ->init_mm.page_table_lock ->&____s->seqcount#2 ->remove_cache_srcu ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&zone->lock ->&meta->lock ->&rcu_state.expedited_wq ->quarantine_lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&base->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 31 BD: 3523 +.+.: &tn->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 3 BD: 1 +.+.: subsys mutex#77 ->&k->k_lock FD: 137 BD: 5 +.+.: nfnl_subsys_ipset ->nlk_cb_mutex-NETFILTER ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->stock_lock ->crngs.lock ->&rq->__lock ->&c->lock ->&obj_hash[i].lock ->&base->lock ->rlock-AF_NETLINK ->&n->list_lock ->ip_set_ref_lock ->(&map->gc) ->&zone->lock FD: 1 BD: 1 +.+.: ip_set_type_mutex FD: 143 BD: 77 +.+.: ipvs->est_mutex ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&n->list_lock ->pcpu_lock ->&obj_hash[i].lock ->&rq->__lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&pool->lock ->(console_sem).lock ->quarantine_lock ->pcpu_alloc_mutex.wait_lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->&____s->seqcount ->rcu_node_0 FD: 1 BD: 75 +.+.: ip_vs_sched_mutex FD: 129 BD: 5 +.+.: __ip_vs_app_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->&rq->__lock ->&n->list_lock FD: 1 BD: 1 +.+.: ip_vs_pe_mutex FD: 1 BD: 1 +.+.: tunnel4_mutex FD: 1 BD: 1 +.+.: xfrm4_protocol_mutex FD: 35 BD: 4 +.+.: inet_diag_table_mutex ->rcu_node_0 ->&rq->__lock ->&sctp_ep_hashtable[i].lock ->&ht->lock FD: 1 BD: 1 +...: xfrm_km_lock FD: 1 BD: 1 +...: xfrm_translator_lock FD: 1 BD: 1 +.+.: xfrm6_protocol_mutex FD: 1 BD: 1 +.+.: tunnel6_mutex FD: 1 BD: 1 +.+.: xfrm_if_cb_lock FD: 1 BD: 1 +...: inetsw6_lock FD: 1 BD: 7 +.+.: &hashinfo->lock#2 FD: 19 BD: 5 +.+.: &net->ipv6.ip6addrlbl_table.lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 218 BD: 3424 +.+.: &idev->mc_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev_addr_list_lock_key ->_xmit_ETHER ->batched_entropy_u32.lock ->&base->lock ->krc.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&bridge_netdev_addr_lock_key ->&dev_addr_list_lock_key#2 ->&batadv_netdev_addr_lock_key ->&vlan_netdev_addr_lock_key ->&macvlan_netdev_addr_lock_key ->&dev_addr_list_lock_key#3 ->remove_cache_srcu ->&bridge_netdev_addr_lock_key/1 ->&rq->__lock ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#2/1 ->rcu_node_0 ->&rcu_state.expedited_wq ->_xmit_ETHER/1 ->&pool->lock ->&batadv_netdev_addr_lock_key/1 ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key#3/1 ->&macsec_netdev_addr_lock_key/1 ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->_xmit_IPGRE ->&lock->wait_lock ->&vlan_netdev_addr_lock_key/2 ->&dev_addr_list_lock_key#3/2 FD: 19 BD: 3429 +...: &dev_addr_list_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 41 BD: 3444 +...: _xmit_ETHER ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&local->filter_lock ->&n->list_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock FD: 874 BD: 1 +.+.: (wq_completion)ipv6_addrconf ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->(work_completion)(&(&ifa->dad_work)->work) ->&rq->__lock FD: 872 BD: 6 +.+.: (work_completion)(&(&net->ipv6.addr_chk_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 29 BD: 76 ....: &x->wait#21 ->&p->pi_lock FD: 50 BD: 3554 ++--: &ndev->lock ->&ifa->lock ->pool_lock#2 ->&dir->lock#2 ->pcpu_lock ->&tb->tb6_lock ->&____s->seqcount ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->batched_entropy_u32.lock ->&base->lock ->&____s->seqcount#2 FD: 11 BD: 1 +.+.: stp_proto_mutex ->llc_sap_list_lock FD: 1 BD: 1 ....: switchdev_notif_chain.lock FD: 28 BD: 72 ++++: (switchdev_blocking_notif_chain).rwsem ->&rq->__lock FD: 874 BD: 1 +.+.: br_ioctl_mutex ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->br_ioctl_mutex.wait_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&dir->lock#2 ->netdev_unregistering_wq.lock FD: 306 BD: 81 +.+.: nf_ct_proto_mutex ->defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->defrag6_mutex ->&rq->__lock ->rcu_node_0 ->nf_ct_proto_mutex.wait_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 237 BD: 11 +.+.: ebt_mutex ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->ebt_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&n->list_lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 1 +.+.: dsa_tag_drivers_lock FD: 1 BD: 1 +...: protocol_list_lock FD: 1 BD: 1 +...: linkfail_lock FD: 1 BD: 1 +...: rose_neigh_list_lock FD: 1 BD: 1 +.+.: proto_tab_lock#2 FD: 1 BD: 25 ++++: chan_list_lock FD: 1 BD: 4 +.+.: l2cap_sk_list.lock FD: 238 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->chan_list_lock ->&mm->mmap_lock FD: 1 BD: 6 +...: slock-AF_BLUETOOTH-BTPROTO_L2CAP FD: 1 BD: 1 ....: rfcomm_wq.lock FD: 1 BD: 1 +.+.: rfcomm_mutex FD: 1 BD: 1 +.+.: auth_domain_lock FD: 1 BD: 1 +.+.: registered_mechs_lock FD: 1 BD: 1 ....: atm_dev_notify_chain.lock FD: 1 BD: 1 +.+.: proto_tab_lock#3 FD: 874 BD: 1 +.+.: vlan_ioctl_mutex ->&mm->mmap_lock ->rtnl_mutex ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->vlan_ioctl_mutex.wait_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->pcpu_lock ->pool_lock#2 ->&dir->lock#2 ->&obj_hash[i].lock ->krc.lock ->netdev_unregistering_wq.lock ->rcu_state.barrier_mutex.wait_lock ->&cfs_rq->removed.lock ->stock_lock FD: 1 BD: 1 +.+.: rds_info_lock FD: 139 BD: 8 ++++: rds_trans_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->crngs.lock ->&id_priv->handler_mutex ->id_table_lock ->&x->wait#27 ->&obj_hash[i].lock FD: 1 BD: 80 ....: &id_priv->lock FD: 2 BD: 75 +.+.: &xa->xa_lock#11 ->pool_lock#2 FD: 193 BD: 83 +.+.: k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&table->hash[i].lock ->k-clock-AF_INET6 ->&queue->rskq_lock ->&obj_hash[i].lock ->&rq->__lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->tk_core.seq.seqcount ->&base->lock ->slock-AF_INET6 ->clock-AF_INET6 ->&dir->lock ->&hashinfo->ehash_locks[i] ->elock-AF_INET6 FD: 69 BD: 87 +.-.: k-slock-AF_INET6 ->kfence_freelist_lock ->pool_lock#2 ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET6 ->krc.lock ->&meta->lock ->crngs.lock ->&hashinfo->ehash_locks[i] ->(&req->rsk_timer) ->&base->lock ->&queue->rskq_lock ->k-clock-AF_INET6 FD: 33 BD: 118 ++.-: k-clock-AF_INET6 FD: 21 BD: 115 +.-.: &tcp_hashinfo.bhash[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&tcp_hashinfo.bhash2[i].lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->stock_lock ->&obj_hash[i].lock ->k-clock-AF_INET ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 20 BD: 116 +.-.: &tcp_hashinfo.bhash2[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->&hashinfo->ehash_locks[i] ->stock_lock ->k-clock-AF_INET ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 93 +.+.: &h->lhash2[i].lock FD: 1 BD: 5 +...: &list->lock#4 FD: 42 BD: 21 ++..: k-clock-AF_TIPC ->pool_lock#2 ->&tn->nametbl_lock ->&obj_hash[i].lock ->&base->lock ->&con->sub_lock ->&c->lock ->&n->list_lock FD: 152 BD: 15 +.+.: k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&tn->nametbl_lock ->&obj_hash[i].lock ->k-clock-AF_TIPC ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&dir->lock ->batched_entropy_u32.lock ->k-sk_lock-AF_TIPC/1 ->&zone->lock ->&list->lock#21 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&base->lock FD: 18 BD: 17 +...: k-slock-AF_TIPC ->&list->lock#21 ->k-clock-AF_TIPC ->&list->lock#45 ->&data->lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 39 BD: 21 +...: &tn->nametbl_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&service->lock ->&nt->cluster_scope_lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->&n->list_lock FD: 37 BD: 22 +...: &service->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock ->&sub->lock ->&____s->seqcount#2 FD: 28 BD: 77 +.+.: &pnettable->lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 FD: 28 BD: 77 +.+.: smc_ib_devices.mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: smc_wr_rx_hash_lock FD: 1 BD: 1 +.+.: v9fs_trans_lock FD: 1 BD: 5 +...: &this->receive_lock FD: 1 BD: 1 +...: lowpan_nhc_lock FD: 323 BD: 8 +.+.: ovs_mutex ->(work_completion)(&data->gc_work) ->nf_ct_proto_mutex ->&obj_hash[i].lock ->pool_lock#2 ->nf_connlabels_lock ->net_rwsem ->quarantine_lock ->&rq->__lock ->krc.lock FD: 303 BD: 82 +.+.: defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 303 BD: 82 +.+.: defrag6_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 138 +.+.: subsys mutex#78 FD: 31 BD: 1 ..-.: &(&gc_work->dwork)->timer FD: 38 BD: 2 +.+.: (work_completion)(&(&gc_work->dwork)->work) ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&base->lock ->&rcu_state.expedited_wq FD: 1 BD: 3509 ...-: &____s->seqcount#7 FD: 31 BD: 1 ..-.: &(&ipvs->defense_work)->timer FD: 35 BD: 6 +.+.: (work_completion)(&(&ipvs->defense_work)->work) ->&s->s_inode_list_lock ->&ipvs->dropentry_lock ->&ipvs->droppacket_lock ->&ipvs->securetcp_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 7 +...: &ipvs->dropentry_lock FD: 1 BD: 7 +...: &ipvs->droppacket_lock FD: 1 BD: 7 +...: &ipvs->securetcp_lock FD: 38 BD: 2 +.+.: drain_vmap_work ->vmap_purge_lock ->&rq->__lock FD: 14 BD: 5 +.-.: (&net->can.stattimer) ->&obj_hash[i].lock ->&base->lock FD: 13 BD: 239 +...: map_idr_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 FD: 1 BD: 1 ....: rcu_read_lock_sched FD: 13 BD: 238 +...: prog_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 1 BD: 73 +...: bpf_lock FD: 1 BD: 1 ....: rcu_read_lock_trace FD: 9 BD: 238 +...: btf_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 131 BD: 127 +.+.: &map->freeze_mutex ->&vma->vm_lock->lock ->vmap_area_lock ->fs_reclaim ->&____s->seqcount ->&rq->__lock ->pool_lock#2 ->stock_lock ->ptlock_ptr(page) ->ptlock_ptr(page)#2 FD: 1 BD: 6 +.+.: ima_keys_lock FD: 132 BD: 137 +.+.: scomp_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&zone->lock ->&c->lock FD: 29 BD: 1 +.+.: pcpu_drain_mutex ->&pcp->lock ->&rq->__lock FD: 31 BD: 1 ..-.: &(&ovs_net->masks_rebalance)->timer FD: 324 BD: 6 +.+.: (work_completion)(&(&ovs_net->masks_rebalance)->work) ->ovs_mutex ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 448 BD: 5 +.+.: k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->local_mutex ->&local->services_lock ->fs_reclaim ->pool_lock#2 ->&rx->incoming_lock ->&obj_hash[i].lock ->&rxnet->conn_lock ->&call->waitq ->(rxrpc_call_limiter).lock ->&rx->recvmsg_lock ->&rx->call_lock ->&rxnet->call_lock ->(&call->timer) ->&base->lock ->&list->lock#23 ->&c->lock ->&rq->__lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->rcu_node_0 ->&n->list_lock FD: 1 BD: 6 +...: k-slock-AF_RXRPC FD: 435 BD: 7 +.+.: &rxnet->local_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->crngs.lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&____s->seqcount ->&c->lock ->&dir->lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->cpu_hotplug_lock ->&rq->__lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&x->wait#22 ->stock_lock ->&table->hash[i].lock ->k-clock-AF_INET6 ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->&n->list_lock ->&____s->seqcount#2 ->k-sk_lock-AF_INET ->k-slock-AF_INET ->&cfs_rq->removed.lock FD: 9 BD: 89 +...: &table->hash[i].lock ->k-clock-AF_INET6 ->&table->hash2[i].lock ->k-clock-AF_INET ->clock-AF_INET ->clock-AF_INET6 FD: 1 BD: 90 +...: &table->hash2[i].lock FD: 301 BD: 2 +.+.: netstamp_work ->cpu_hotplug_lock FD: 29 BD: 8 ....: &x->wait#22 ->&p->pi_lock FD: 1 BD: 9 +.+.: &local->services_lock FD: 1 BD: 10 +.+.: &rxnet->conn_lock FD: 1 BD: 6 ....: &call->waitq FD: 1 BD: 6 +.+.: &rx->call_lock FD: 1 BD: 6 +.+.: &rxnet->call_lock FD: 33 BD: 5 +.-.: (&rxnet->peer_keepalive_timer) FD: 18 BD: 7 +.+.: (wq_completion)krxrpcd ->(work_completion)(&rxnet->peer_keepalive_work) ->(work_completion)(&rxnet->service_conn_reaper) FD: 15 BD: 8 +.+.: (work_completion)(&rxnet->peer_keepalive_work) ->&rxnet->peer_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 9 +.+.: &rxnet->peer_hash_lock FD: 137 BD: 1 +.+.: init_user_ns.keyring_sem ->key_user_lock ->root_key_user.lock ->fs_reclaim ->pool_lock#2 ->crngs.lock ->key_serial_lock ->key_construction_mutex ->&type->lock_class ->keyring_serialise_link_lock FD: 1 BD: 5 +.+.: root_key_user.lock FD: 1 BD: 6 +.+.: keyring_name_lock FD: 1 BD: 1 +.+.: template_list FD: 1 BD: 1 +.+.: idr_lock FD: 130 BD: 9 +.+.: ima_extend_list_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->ima_extend_list_mutex.wait_lock ->remove_cache_srcu ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 1 +.+.: clk_debug_lock FD: 29 BD: 4 +.+.: deferred_probe_work ->deferred_probe_mutex FD: 1 BD: 72 +.+.: &(&net->nexthop.notifier_chain)->rwsem FD: 277 BD: 81 +.+.: k-sk_lock-AF_INET ->k-slock-AF_INET ->&table->hash[i].lock ->&obj_hash[i].lock ->k-clock-AF_INET ->&rq->__lock ->&tcp_hashinfo.bhash[i].lock ->pool_lock#2 ->stock_lock ->&hashinfo->ehash_locks[i] ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->batched_entropy_u16.lock ->fs_reclaim ->&c->lock ->&n->list_lock ->&base->lock ->&ei->socket.wq.wait ->&____s->seqcount ->&____s->seqcount#2 ->&in_dev->mc_tomb_lock ->&im->lock ->_xmit_IPGRE ->&____s->seqcount#8 ->(kmod_concurrent_max).lock ->&x->wait#17 ->running_helpers_waitq.lock ->&mm->mmap_lock ->l2tp_ip_lock ->&h->lhash2[i].lock ->&queue->rskq_lock FD: 45 BD: 83 +.-.: k-slock-AF_INET ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->elock-AF_INET ->&base->lock ->&tcp_hashinfo.bhash[i].lock ->krc.lock ->&meta->lock ->key#24 FD: 1 BD: 118 ++..: k-clock-AF_INET FD: 872 BD: 2 +.+.: reg_work ->rtnl_mutex FD: 1 BD: 72 +...: reg_pending_beacons_lock FD: 886 BD: 2 +.+.: (work_completion)(&fw_work->work) ->fs_reclaim ->pool_lock#2 ->&fw_cache.lock ->&c->lock ->&zone->lock ->&____s->seqcount ->tk_core.seq.seqcount ->async_lock ->init_task.alloc_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->&base->lock ->(console_sem).lock ->console_owner_lock ->console_owner ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->umhelper_sem ->fw_lock ->rtnl_mutex FD: 2 BD: 3 +.+.: &fw_cache.lock ->pool_lock#2 FD: 1 BD: 1 +.+.: system_transition_mutex/1 FD: 1 BD: 1 +.+.: acpi_gpio_deferred_req_irqs_lock FD: 31 BD: 1 ..-.: fs/file_table.c:368 FD: 1 BD: 1 +.+.: prepare_lock FD: 3 BD: 4 +.+.: subsys mutex#79 ->&k->k_lock FD: 2 BD: 11 +.+.: fw_lock ->&x->wait#23 FD: 1 BD: 12 ....: &x->wait#23 FD: 4 BD: 2 +.+.: (delayed_fput_work).work ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: cdev_lock FD: 369 BD: 2 +.+.: &tty->legacy_mutex ->&tty->read_wait ->&tty->write_wait ->&tty->ldisc_sem ->&tty->files_lock ->&port->lock ->&port->mutex ->&port_lock_key ->tasklist_lock ->&tty->ctrl.lock ->&f->f_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 7 ....: &tty->read_wait FD: 1 BD: 3667 -.-.: &tty->write_wait FD: 353 BD: 3 ++++: &tty->ldisc_sem ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&zone->lock ->&tty->write_wait ->&tty->read_wait ->&tty->termios_rwsem ->&mm->mmap_lock ->&port_lock_key ->&port->lock ->&tty->flow.lock ->&ldata->atomic_read_lock FD: 265 BD: 6 ++++: &tty->termios_rwsem ->&port->mutex ->&tty->write_wait ->&tty->read_wait ->&ldata->output_lock ->&port_lock_key FD: 1 BD: 3 +.+.: &tty->files_lock FD: 1 BD: 3667 -.-.: &port->lock FD: 129 BD: 10 +.+.: hash_mutex ->fs_reclaim ->pool_lock#2 FD: 4 BD: 10 -.-.: &i->lock ->&port_lock_key FD: 29 BD: 4196 ....: &wq#2 ->&p->pi_lock FD: 402 BD: 1 +.+.: &bdev->bd_fsfreeze_mutex ->sb_lock ->fs_reclaim ->pool_lock#2 ->&type->s_umount_key#24/1 ->&type->s_umount_key#25/1 ->&c->lock ->&zone->lock ->&____s->seqcount ->&type->s_umount_key#26/1 ->&type->s_umount_key#27/1 FD: 160 BD: 2 +.+.: &type->s_umount_key#24/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->&wq->mutex ->&____s->seqcount ->&zone->lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->wq_pool_mutex ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->bit_wait_table + i ->wq_mayday_lock ->&sbi->old_work_lock ->(work_completion)(&(&sbi->old_work)->work) FD: 29 BD: 4157 ..-.: bit_wait_table + i ->&p->pi_lock FD: 133 BD: 4 +.+.: ext4_li_mtx ->fs_reclaim ->pool_lock#2 ->batched_entropy_u16.lock ->&eli->li_list_mtx ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock FD: 1 BD: 3 +.+.: &sbi->old_work_lock FD: 1 BD: 3 +.+.: (work_completion)(&(&sbi->old_work)->work) FD: 12 BD: 4157 ....: &xa->xa_lock#12 ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 145 BD: 2 +.+.: &type->s_umount_key#25/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&rq->__lock ->bit_wait_table + i ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->&zone->lock ->crypto_alg_sem FD: 145 BD: 2 +.+.: &type->s_umount_key#26/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem FD: 396 BD: 2 +.+.: &type->s_umount_key#27/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->&____s->seqcount ->pool_lock#2 ->sb_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->percpu_counters_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->inode_hash_lock ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_mutex_key#8 ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&journal->j_state_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&journal->j_wait_done_commit ->&sbi->s_error_lock ->&base->lock ->&fq->mq_flush_lock ->&p->alloc_lock ->cpu_hotplug_lock ->wq_pool_mutex ->&ei->i_es_lock ->ext4_grpinfo_slab_create_mutex ->&s->s_inode_list_lock ->ext4_li_mtx ->lock ->&root->kernfs_rwsem ->(console_sem).lock ->&dentry->d_lock FD: 36 BD: 148 +.+.: &bgl->locks[i].lock ->&sbi->s_md_lock ->&obj_hash[i].lock ->pool_lock#2 ->&ei->i_prealloc_lock ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&pa->pa_lock#2 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 56 BD: 4140 +.+.: &sb->s_type->i_lock_key#22 ->&dentry->d_lock ->&lru->node[i].lock ->&xa->xa_lock#6 ->bit_wait_table + i FD: 246 BD: 7 ++++: &sb->s_type->i_mutex_key#8 ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->integrity_iint_lock ->remove_cache_srcu ->tk_core.seq.seqcount ->&ei->xattr_sem ->fs_reclaim ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->rcu_node_0 ->&meta->lock ->quarantine_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->mapping.invalidate_lock ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->swap_cgroup_mutex ->&base->lock ->&fq->mq_flush_lock ->&x->wait#26 ->(&timer.timer) ->swapon_mutex ->proc_poll_wait.lock ->&dentry->d_lock ->&mm->mmap_lock ->&dd->lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->stock_lock ->&____s->seqcount#2 ->&sb->s_type->i_mutex_key#8/4 ->&n->list_lock ->&sem->wait_lock ->&rcu_state.expedited_wq ->key#3 ->key#15 ->ima_extend_list_mutex ->&p->alloc_lock ->&list->lock ->kauditd_wait.lock ->&sbi->s_writepages_rwsem ->&folio_wait_table[i] ->lock#5 ->&p->pi_lock ->pool_lock ->&cfs_rq->removed.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&lruvec->lru_lock ->lock#10 ->&journal->j_checkpoint_mutex ->bit_wait_table + i ->&journal->j_wait_transaction_locked ->&sem->waiters ->&rsp->gp_wait FD: 36 BD: 4123 ++++: &ei->i_es_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&sbi->s_es_lock ->&obj_hash[i].lock ->key#2 ->key#6 ->key#7 ->key#8 ->&____s->seqcount#2 ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock FD: 125 BD: 145 ++++: &ei->i_data_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&ei->i_es_lock ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&ei->i_prealloc_lock ->&n->list_lock ->&sb->s_type->i_lock_key#22 ->&(ei->i_block_reservation_lock) ->&lg->lg_mutex ->&ei->i_raw_lock ->&mapping->private_lock ->&rq->__lock ->&wb->list_lock ->&xa->xa_lock#6 ->lock#4 ->&ret->b_state_lock ->&journal->j_revoke_lock ->key#15 ->&sbi->s_md_lock ->key#3 ->&wb->work_lock ->&____s->seqcount#2 ->remove_cache_srcu ->&ei->i_data_sem/1 ->rcu_node_0 ->stock_lock ->&pa->pa_lock#2 ->&sem->wait_lock ->&rcu_state.expedited_wq ->bit_wait_table + i ->&journal->j_state_lock ->&bgl->locks[i].lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&dd->lock ->&journal->j_wait_updates ->(console_sem).lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&base->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->tk_core.seq.seqcount FD: 1 BD: 4124 +.+.: &sbi->s_es_lock FD: 80 BD: 147 ++++: &journal->j_state_lock ->&journal->j_wait_done_commit ->&journal->j_wait_commit ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->&journal->j_wait_updates ->&journal->j_wait_transaction_locked ->&journal->j_list_lock ->&journal->j_wait_reserved ->&lock->wait_lock ->&p->pi_lock FD: 29 BD: 148 ....: &journal->j_wait_done_commit ->&p->pi_lock FD: 1 BD: 4 +.+.: &sbi->s_error_lock FD: 34 BD: 151 ..-.: &fq->mq_flush_lock ->&q->requeue_lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&x->wait#26 FD: 1 BD: 154 ..-.: &q->requeue_lock FD: 29 BD: 148 ....: &journal->j_wait_commit ->&p->pi_lock FD: 161 BD: 3 +.+.: ext4_grpinfo_slab_create_mutex ->slab_mutex FD: 1 BD: 1 ....: &rs->lock FD: 32 BD: 2 +.+.: (work_completion)(&s->destroy_work) ->&rsp->gp_wait ->pcpu_lock ->&obj_hash[i].lock ->quarantine_lock ->&rq->__lock ->&base->lock FD: 4 BD: 140 ..-.: &rsp->gp_wait ->&obj_hash[i].lock ->pool_lock#2 FD: 190 BD: 5 ++++: &type->i_mutex_dir_key#3 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->inode_hash_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#22 ->namespace_sem ->&zone->lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->tomoyo_ss ->&s->s_inode_list_lock ->&ei->xattr_sem ->jbd2_handle ->&n->list_lock ->&____s->seqcount#2 ->stock_lock ->remove_cache_srcu ->rcu_node_0 ->&cfs_rq->removed.lock ->&journal->j_wait_transaction_locked ->&rcu_state.expedited_wq ->&sem->wait_lock ->quarantine_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock FD: 43 BD: 81 +.+.: rcu_state.barrier_mutex ->rcu_state.barrier_lock ->&x->wait#24 ->&rq->__lock ->&pool->lock ->rcu_state.barrier_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->stock_lock FD: 29 BD: 82 ..-.: &x->wait#24 ->&p->pi_lock FD: 30 BD: 1 +.-.: (&cb->timer) ->&obj_hash[i].lock ->&base->lock ->tk_core.seq.seqcount ->&rq_wait->wait FD: 1 BD: 1 +.+.: (init_mm).mmap_lock FD: 158 BD: 1 +.+.: &type->s_umount_key#28/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key#9 ->&dentry->d_lock FD: 42 BD: 4134 +.+.: &sb->s_type->i_lock_key#23 ->&dentry->d_lock ->bit_wait_table + i ->&dentry->d_lock/1 FD: 152 BD: 4 ++++: &sb->s_type->i_mutex_key#9 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->rename_lock.seqcount ->proc_subdir_lock ->sysctl_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&p->alloc_lock ->&pid->lock ->namespace_sem ->tomoyo_ss ->&n->list_lock ->&rq->__lock ->&xa->xa_lock#12 ->stock_lock ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.gp_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 242 BD: 2 .+.+: sb_writers#3 ->mount_lock ->&sb->s_type->i_mutex_key#9 ->sysctl_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&h->resize_lock ->hugetlb_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#23 ->&wb->list_lock ->&dentry->d_lock ->tomoyo_ss ->&mm->mmap_lock ->oom_adj_mutex ->&p->pi_lock ->&rq->__lock ->&____s->seqcount#11 ->&(&net->ipv4.ping_group_range.lock)->lock ->&c->lock ->&n->list_lock ->oom_adj_mutex.wait_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->remove_cache_srcu ->&rcu_state.expedited_wq FD: 131 BD: 3 +.+.: &h->resize_lock ->free_hpage_work ->hugetlb_lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4 +.+.: free_hpage_work FD: 2 BD: 129 ....: hugetlb_lock ->&____s->seqcount#2 FD: 191 BD: 134 ++++: mapping.invalidate_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#6 ->lock#4 ->&ei->i_es_lock ->&ei->i_data_sem ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&c->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->&rq->__lock ->&base->lock ->&mapping->i_mmap_rwsem ->&journal->j_state_lock ->jbd2_handle ->stock_lock ->&folio_wait_table[i] ->&sem->wait_lock ->&mapping->private_lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&sbi->s_writepages_rwsem ->lock#5 ->&lruvec->lru_lock ->&ei->i_data_sem/1 ->&____s->seqcount#2 ->&n->list_lock ->&rcu_state.expedited_wq ->remove_cache_srcu ->fs_reclaim ->&cfs_rq->removed.lock ->&p->pi_lock ->quarantine_lock ->bit_wait_table + i ->&sem->waiters ->&rsp->gp_wait ->&journal->j_wait_transaction_locked ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->key#3 ->key#15 FD: 1 BD: 4112 ++++: integrity_iint_lock FD: 211 BD: 4 +.+.: &iint->mutex ->&ei->xattr_sem ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->ima_extend_list_mutex ->mapping.invalidate_lock ->&folio_wait_table[i] ->&rq->__lock ->&c->lock ->&zone->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->tk_core.seq.seqcount ->&n->list_lock ->&lock->wait_lock ->remove_cache_srcu ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&____s->seqcount#2 ->ima_extend_list_mutex.wait_lock ->&p->pi_lock ->&rcu_state.expedited_wq ->&p->alloc_lock ->&list->lock ->kauditd_wait.lock FD: 61 BD: 10 .+.+: &ei->xattr_sem ->&mapping->private_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#6 ->lock#4 ->tk_core.seq.seqcount ->&dd->lock ->&c->lock ->bit_wait_table + i FD: 1 BD: 4 ++++: entries_lock FD: 251 BD: 2 +.+.: &sig->exec_update_lock ->&p->alloc_lock ->&sighand->siglock ->&newf->file_lock ->batched_entropy_u64.lock ->&mm->mmap_lock ->delayed_uprobe_lock ->&memcg->mm_list.lock ->pgd_lock ->pool_lock#2 ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock ->quarantine_lock ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->stock_lock FD: 1 BD: 4111 +.+.: &memcg->mm_list.lock FD: 3 BD: 3465 ..-.: batched_entropy_u16.lock ->crngs.lock FD: 29 BD: 4123 +.+.: ptlock_ptr(page)#2/1 FD: 128 BD: 1 ++++: &type->s_umount_key#29 ->shrinker_rwsem ->&dentry->d_lock ->rename_lock.seqcount ->&dentry->d_lock/1 ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->&lru->node[i].lock ->&rq->__lock ->&pid->lock FD: 883 BD: 2 +.+.: (work_completion)(&map->work) ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->dev_map_lock ->rcu_node_0 ->&rnp->exp_wq[2] ->&rq->__lock ->rcu_state.barrier_mutex ->stock_lock ->pcpu_lock ->percpu_counters_lock ->&rnp->exp_wq[1] ->&rnp->exp_wq[0] ->&rnp->exp_wq[3] ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->rtnl_mutex ->btf_idr_lock ->rcu_state.barrier_mutex.wait_lock ->&htab->buckets[i].lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&zone->lock ->&rcu_state.expedited_wq ->cgroup_mutex FD: 872 BD: 2 +.+.: (work_completion)(&aux->work) ->map_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->pack_mutex ->pcpu_lock ->vmap_area_lock ->purge_vmap_area_lock ->quarantine_lock ->stock_lock ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->&base->lock ->rtnl_mutex ->rcu_node_0 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4124 ....: key#2 FD: 926 BD: 3 +.+.: &p->lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->file_systems_lock ->&____s->seqcount ->&obj_hash[i].lock ->namespace_sem ->&c->lock ->&rq->__lock ->&of->mutex ->&n->list_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->cpufreq_driver_lock ->module_mutex ->&____s->seqcount#2 ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 146 BD: 1 +.+.: &type->s_umount_key#30/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#24 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 41 BD: 4138 +.+.: &sb->s_type->i_lock_key#24 ->&dentry->d_lock FD: 150 BD: 3 ++++: &type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#24 ->&c->lock ->&____s->seqcount ->namespace_sem ->tk_core.seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->remove_cache_srcu ->&rq->__lock ->rcu_node_0 ->&obj_hash[i].lock ->&sem->wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock FD: 29 BD: 193 ....: &x->wait#25 ->&p->pi_lock FD: 42 BD: 11 +.+.: &net->unx.table.locks[i] ->&net->unx.table.locks[i]/1 FD: 1083 BD: 2 +.+.: &sb->s_type->i_mutex_key#10 ->&net->unx.table.locks[i] ->&u->lock ->&u->peer_wait ->rlock-AF_UNIX ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->nl_table_lock ->nl_table_wait.lock ->clock-AF_NETLINK ->genl_sk_destructing_waitq.lock ->&nlk->wait ->wlock-AF_NETLINK ->(netlink_chain).rwsem ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#8 ->&wb->list_lock ->&dentry->d_lock ->sk_lock-AF_INET ->slock-AF_INET ->clock-AF_INET ->sk_lock-AF_INET6 ->slock-AF_INET6 ->clock-AF_INET6 ->&cfs_rq->removed.lock ->&table->hash[i].lock ->&meta->lock ->kfence_freelist_lock ->&net->packet.sklist_lock ->&po->bind_lock ->sk_lock-AF_PACKET ->slock-AF_PACKET ->fanout_mutex ->&rnp->exp_wq[3] ->clock-AF_PACKET ->rlock-AF_PACKET ->pcpu_lock ->elock-AF_PACKET ->&rnp->exp_wq[1] ->quarantine_lock ->&rcu_state.expedited_wq ->&rnp->exp_wq[0] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->hci_dev_list_lock ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->&c->lock ->&____s->seqcount ->pool_lock ->stock_lock ->clock-AF_ROSE ->sk_lock-AF_ROSE ->slock-AF_ROSE ->wlock-AF_ROSE ->&list->lock#20 ->l2tp_ip6_lock ->&hashinfo->lock#2 ->&net->ipv4.ra_mutex ->&hashinfo->lock ->&net->xdp.lock ->&xs->map_list_lock ->&xs->mutex ->vmap_area_lock ->purge_vmap_area_lock ->clock-AF_XDP ->sk_lock-AF_TIPC ->slock-AF_TIPC ->clock-AF_NETROM ->sk_lock-AF_NETROM ->slock-AF_NETROM ->bcm_notifier_lock ->sk_lock-AF_CAN ->slock-AF_CAN ->rlock-AF_CAN ->elock-AF_CAN ->(work_completion)(&smc->connect_work) ->sk_lock-AF_SMC ->slock-AF_SMC ->&smc->clcsock_release_lock ->sk_lock-AF_INET6/1 ->&net->sctp.addr_wq_lock ->sk_lock-AF_KCM ->slock-AF_KCM ->&mux->lock ->(work_completion)(&kcm->tx_work) ->&mux->rx_lock ->&knet->mutex ->&rnp->exp_wq[2] ->dgram_lock ->clock-AF_IEEE802154 ->rlock-AF_IEEE802154 ->&rnp->exp_lock ->rcu_state.exp_mutex ->clock-AF_RXRPC ->(wq_completion)krxrpcd ->&wq->mutex ->rlock-AF_RXRPC ->&x->wait ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->sk_lock-AF_INET/1 ->clock-AF_RDS ->&rs->rs_recv_lock ->rds_cong_monitor_lock ->rds_cong_lock ->&rs->rs_lock ->&rs->rs_rdma_lock ->&q->lock ->rds_sock_lock ->sk_lock-AF_LLC ->slock-AF_LLC ->(&llc->pf_cycle_timer.timer) ->&base->lock ->(&llc->ack_timer.timer) ->(&llc->rej_sent_timer.timer) ->(&llc->busy_state_timer.timer) ->rlock-AF_LLC ->wlock-AF_LLC ->&list->lock#29 ->cpu_hotplug_lock ->pfkey_mutex ->clock-AF_KEY ->wlock-AF_KEY ->sk_lock-AF_PPPOX ->slock-AF_PPPOX ->rlock-AF_KEY ->l2tp_ip_lock ->&pnsocks.lock ->resource_mutex ->clock-AF_PHONET ->rlock-AF_PHONET ->(work_completion)(&msk->work) ->l2cap_sk_list.lock ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&chan->lock/1 ->chan_list_lock ->sk_lock-AF_UNIX ->slock-AF_UNIX ->(work_completion)(&(&psock->work)->work) ->clock-AF_UNIX ->&psock->ingress_lock ->&net->xfrm.xfrm_policy_lock ->&policy->lock ->&list->lock#33 ->&zone->lock ->rtnl_mutex ->ip6_ra_lock ->rlock-AF_CAIF ->sk_lock-AF_CAIF ->slock-AF_CAIF ->elock-AF_CAIF ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->sk_lock-AF_NFC ->slock-AF_NFC ->clock-AF_NFC ->rlock-AF_NFC ->&list->lock#34 ->unix_gc_lock ->&local->services_lock ->(console_sem).lock ->base_sockets.lock ->clock-AF_ISDN ->crypto_default_null_skcipher_lock ->sk_lock-AF_X25 ->slock-AF_X25 ->sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM ->slock-AF_BLUETOOTH-BTPROTO_RFCOMM ->clock-AF_BLUETOOTH ->rfcomm_sk_list.lock ->&d->lock ->&list->lock#36 ->sk_lock-AF_QIPCRTR ->slock-AF_QIPCRTR ->rlock-AF_PPPOX ->wlock-AF_PPPOX ->sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->sco_sk_list.lock ->&ping_table.lock ->krc.lock ->raw_sk_list.lock ->raw_notifier_lock ->sk_lock-AF_AX25 ->slock-AF_AX25 ->nfnl_grp_active_lock ->rtnl_mutex.wait_lock ->ip6_sk_fl_lock ->ip6_fl_lock ->&match->lock ->sk_lock-AF_PHONET ->slock-AF_PHONET ->&list->lock#42 ->isotp_notifier_lock ->&data->lock ->&bsd_socket_locks[i] ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#22 ->&s->s_inode_list_lock ->sb_internal ->inode_hash_lock ->&n->list_lock ->raw_lock ->rds_ib_devices_lock ->cmtp_sk_list.lock ->sk_lock-AF_SMC/1 ->(work_completion)(&smc->tcp_listen_work) ->&dir->lock#2 ->console_owner_lock ->console_owner ->k-sk_lock-AF_INET ->k-slock-AF_INET ->data_sockets.lock ->sk_lock-AF_ISDN ->slock-AF_ISDN ->&rng->jent_lock ->bnep_sk_list.lock FD: 53 BD: 6 +.+.: &u->lock ->clock-AF_UNIX ->&u->lock/1 ->&sk->sk_peer_lock ->rlock-AF_UNIX ->&u->peer_wait ->&ei->socket.wq.wait ->&f->f_owner.lock FD: 9 BD: 8 +...: clock-AF_UNIX ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 34 BD: 7 +.+.: &u->peer_wait ->&p->pi_lock ->&ei->socket.wq.wait FD: 1 BD: 10 +.+.: rlock-AF_UNIX FD: 260 BD: 2 .+.+: sb_writers#4 ->mount_lock ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&wb->work_lock ->&type->i_mutex_dir_key#3 ->&type->i_mutex_dir_key#3/1 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->&dd->lock ->bit_wait_table + i ->&rq->__lock ->remove_cache_srcu ->&sb->s_type->i_mutex_key#8 ->tomoyo_ss ->&s->s_inode_list_lock ->sb_internal ->inode_hash_lock ->&fsnotify_mark_srcu ->&dentry->d_lock ->&ei->xattr_sem ->stock_lock ->&ei->i_prealloc_lock ->&ei->i_es_lock ->lock#5 ->&lruvec->lru_lock ->rcu_node_0 ->integrity_iint_lock ->&sb->s_type->i_mutex_key#8/4 ->&sem->wait_lock ->&p->pi_lock ->&iint->mutex ->&rcu_state.expedited_wq ->quarantine_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&base->lock ->fs_reclaim ->&n->list_lock ->mapping.invalidate_lock ->&folio_wait_table[i] ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&journal->j_wait_transaction_locked ->&sbi->s_writepages_rwsem ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&fq->mq_flush_lock ->&x->wait#26 ->(&timer.timer) ->&journal->j_list_lock ->(console_sem).lock ->&journal->j_barrier FD: 1 BD: 4112 +.+.: &pid->lock FD: 1 BD: 26 +.+.: &new_ns->ns_lock FD: 208 BD: 1 ++++: &type->s_umount_key#31 ->&lru->node[i].lock ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 ->&sb->s_type->i_lock_key#22 ->&journal->j_state_lock ->&p->alloc_lock ->(work_completion)(&sbi->s_error_work) ->key#3 ->key#4 ->&sbi->s_error_lock ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&base->lock ->&fq->mq_flush_lock ->&rq->__lock ->bit_wait_table + i ->ext4_li_mtx ->(console_sem).lock ->mount_lock ->&xa->xa_lock#6 ->&eli->li_list_mtx ->&wb->list_lock ->&sbi->s_writepages_rwsem ->rcu_node_0 ->&bdi->wb_waitq ->&cfs_rq->removed.lock ->&s->s_inode_list_lock ->&ei->i_es_lock ->inode_hash_lock ->&fsnotify_mark_srcu ->&ei->i_prealloc_lock ->integrity_iint_lock ->&journal->j_list_lock FD: 1 BD: 2 +.+.: (work_completion)(&sbi->s_error_work) FD: 1 BD: 148 ....: key#3 FD: 1 BD: 145 ....: key#4 FD: 4 BD: 5 +.+.: &eli->li_list_mtx ->&obj_hash[i].lock ->pool_lock#2 FD: 148 BD: 144 ++++: jbd2_handle ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->&ei->i_raw_lock ->&journal->j_wait_updates ->&mapping->private_lock ->&meta_group_info[i]->alloc_sem ->tk_core.seq.seqcount ->inode_hash_lock ->batched_entropy_u32.lock ->&ei->i_es_lock ->&sb->s_type->i_lock_key#22 ->&journal->j_state_lock ->&rq->__lock ->lock#4 ->lock#5 ->&ei->i_data_sem ->&xa->xa_lock#6 ->&sbi->s_orphan_lock ->bit_wait_table + i ->&obj_hash[i].lock ->&journal->j_list_lock ->&base->lock ->&dd->lock ->&rq_wait->wait ->rcu_node_0 ->stock_lock ->&ei->i_prealloc_lock ->&(ei->i_block_reservation_lock) ->key#4 ->&____s->seqcount#2 ->&bgl->locks[i].lock ->&sem->wait_lock ->&p->pi_lock ->&journal->j_wait_reserved ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&n->list_lock ->&folio_wait_table[i] ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&meta->lock ->quarantine_lock ->&ei->i_data_sem/1 ->key#28 FD: 74 BD: 149 +.+.: &ret->b_state_lock ->&journal->j_list_lock ->&obj_hash[i].lock ->bit_wait_table + i FD: 73 BD: 4127 +.+.: &journal->j_list_lock ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->key#14 ->&wb->work_lock ->&obj_hash[i].lock ->&c->lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->bit_wait_table + i ->quarantine_lock FD: 1 BD: 147 +.+.: &journal->j_revoke_lock FD: 1 BD: 148 +.+.: &ei->i_raw_lock FD: 29 BD: 149 ....: &journal->j_wait_updates ->&p->pi_lock FD: 33 BD: 4157 ..-.: &wb->work_lock ->&obj_hash[i].lock ->&base->lock FD: 54 BD: 145 ++++: &meta_group_info[i]->alloc_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&x->wait#26 ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->&base->lock ->(&timer.timer) ->&fq->mq_flush_lock ->&bgl->locks[i].lock FD: 153 BD: 6 .+.+: sb_internal ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&c->lock ->rcu_node_0 ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount ->remove_cache_srcu ->&base->lock ->&n->list_lock ->quarantine_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&journal->j_wait_transaction_locked FD: 2 BD: 4125 ++++: &ei->i_prealloc_lock ->&pa->pa_lock#2 FD: 31 BD: 1 .+.+: file_rwsem ->&ctx->flc_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 FD: 2 BD: 2 +.+.: &ctx->flc_lock ->&fll->lock FD: 1 BD: 3 +.+.: &fll->lock FD: 229 BD: 3 +.+.: &type->i_mutex_dir_key#3/1 ->rename_lock.seqcount ->&dentry->d_lock ->fs_reclaim ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->inode_hash_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#22 ->tomoyo_ss ->&s->s_inode_list_lock ->&ei->xattr_sem ->jbd2_handle ->&c->lock ->&n->list_lock ->&sb->s_type->i_mutex_key#8 ->&xa->xa_lock#12 ->stock_lock ->&____s->seqcount#2 ->&fsnotify_mark_srcu ->&type->i_mutex_dir_key#3 ->&wb->list_lock ->sb_internal ->rcu_node_0 ->&rcu_state.gp_wq ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rcu_state.expedited_wq ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->quarantine_lock ->&cfs_rq->removed.lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->key#3 ->key#15 ->&journal->j_wait_transaction_locked ->&meta->lock ->&u->bindlock ->&base->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 141 BD: 1 +.+.: &type->s_umount_key#32/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->&____s->seqcount ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#25 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key#11 ->&dentry->d_lock FD: 41 BD: 3 +.+.: &sb->s_type->i_lock_key#25 ->&dentry->d_lock FD: 130 BD: 2 +.+.: &sb->s_type->i_mutex_key#11 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#25 ->&s->s_inode_list_lock ->tk_core.seq.seqcount FD: 131 BD: 1 +.+.: &type->s_umount_key#33 ->sb_lock ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&lru->node[i].lock ->&obj_hash[i].lock FD: 43 BD: 1 +.+.: &type->s_umount_key#34 ->sb_lock ->&dentry->d_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#35/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 4 +.+.: &sb->s_type->i_lock_key#26 ->&dentry->d_lock FD: 1 BD: 1 +.+.: redirect_lock FD: 350 BD: 1 +.+.: &tty->atomic_write_lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&mm->mmap_lock ->&tty->termios_rwsem FD: 4 BD: 7 +.+.: &ldata->output_lock ->&port_lock_key FD: 140 BD: 1 +.+.: &type->s_umount_key#36/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#27 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->fuse_mutex ->&dentry->d_lock FD: 41 BD: 4136 +.+.: &sb->s_type->i_lock_key#27 ->&dentry->d_lock FD: 1 BD: 2 +.+.: fuse_mutex FD: 141 BD: 1 +.+.: &type->s_umount_key#37/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#28 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->pstore_sb_lock ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#28 ->&dentry->d_lock FD: 1 BD: 2 +.+.: pstore_sb_lock FD: 144 BD: 1 +.+.: &type->s_umount_key#38/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#29 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->bpf_preload_lock ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#29 ->&dentry->d_lock FD: 132 BD: 2 +.+.: bpf_preload_lock ->(kmod_concurrent_max).lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#17 ->&rq->__lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&____s->seqcount ->running_helpers_waitq.lock FD: 29 BD: 1 ++++: uts_sem ->hostname_poll.wait.lock ->&rq->__lock FD: 131 BD: 1 +.+.: &type->s_umount_key#39 ->sb_lock ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&lru->node[i].lock ->&obj_hash[i].lock FD: 133 BD: 3 ++++: &type->i_mutex_dir_key#5 ->fs_reclaim ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->&sbinfo->stat_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->rcu_node_0 ->&rq->__lock ->&sem->wait_lock ->remove_cache_srcu ->&obj_hash[i].lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&rcu_state.gp_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&p->pi_lock FD: 143 BD: 2 .+.+: sb_writers#5 ->mount_lock ->&type->i_mutex_dir_key#5 ->&type->i_mutex_dir_key#5/1 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key ->&wb->list_lock ->&sb->s_type->i_mutex_key#12 ->&rq->__lock ->&sem->wait_lock ->&p->pi_lock ->&s->s_inode_list_lock ->&info->lock ->&sbinfo->stat_lock ->&xa->xa_lock#6 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->tomoyo_ss ->&xattrs->lock ->fs_reclaim ->&c->lock ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&dentry->d_lock ->rcu_node_0 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock FD: 133 BD: 4 +.+.: &sb->s_type->i_mutex_key#12 ->&xattrs->lock ->tk_core.seq.seqcount ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#6 ->lock#4 ->&info->lock ->&sb->s_type->i_lock_key ->&wb->list_lock ->rcu_node_0 ->&rq->__lock ->key#9 ->&dentry->d_lock ->&sb->s_type->i_mutex_key#12/4 ->tomoyo_ss ->&mapping->i_mmap_rwsem ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&cfs_rq->removed.lock FD: 139 BD: 3 +.+.: &type->i_mutex_dir_key#5/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&u->bindlock ->&sb->s_type->i_mutex_key#12/4 ->&sb->s_type->i_mutex_key#12 ->&fsnotify_mark_srcu ->&sem->wait_lock ->&rq->__lock ->&n->list_lock ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&info->lock ->&xa->xa_lock#6 ->&dentry->d_lock/1 ->remove_cache_srcu ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rcu_state.gp_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&p->pi_lock FD: 6 BD: 83 +.+.: &f->f_lock ->fasync_lock FD: 1 BD: 2 ....: hostname_poll.wait.lock FD: 236 BD: 1 .+.+: dup_mmap_sem ->&mm->mmap_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 134 BD: 127 +.+.: &mm->mmap_lock/1 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->fs_reclaim ->&vma->vm_lock->lock ->&mapping->i_mmap_rwsem ->&anon_vma->rwsem ->mmu_notifier_invalidate_range_start ->&mm->page_table_lock ->ptlock_ptr(page) ->ptlock_ptr(page)#2 ->&mm->context.lock ->&obj_hash[i].lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&zone->lock ->&n->list_lock ->remove_cache_srcu ->&sem->wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->&base->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->key#23 ->lock#10 ->&meta->lock ->pool_lock FD: 28 BD: 128 +.+.: &mm->context.lock ->&rq->__lock FD: 1 BD: 9 .+.+: &xattrs->lock FD: 1 BD: 3 ....: key#5 FD: 133 BD: 8 +.+.: &u->bindlock ->&net->unx.table.locks[i] ->&net->unx.table.locks[i]/1 ->&bsd_socket_locks[i] ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock FD: 41 BD: 12 +.+.: &net->unx.table.locks[i]/1 ->&dentry->d_lock FD: 1 BD: 11 +.+.: &bsd_socket_locks[i] FD: 250 BD: 1 +.+.: &u->iolock ->rlock-AF_UNIX ->&mm->mmap_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&u->peer_wait ->quarantine_lock ->&rq->__lock ->&____s->seqcount ->&u->lock ->&base->lock ->&dir->lock ->rcu_node_0 ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&sem->wait_lock ->&p->pi_lock FD: 33 BD: 3780 ..-.: &ei->socket.wq.wait ->&p->pi_lock ->&ep->lock ->&ep->poll_wait/1 FD: 1 BD: 4196 ....: &wq#3 FD: 43 BD: 7 +.+.: &u->lock/1 ->&sk->sk_peer_lock ->&dentry->d_lock ->&sk->sk_peer_lock/1 FD: 149 BD: 1 +.+.: &group->mark_mutex ->&fsnotify_mark_srcu ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->lock ->ucounts_lock ->&mark->lock ->&conn->lock ->&sb->s_type->i_lock_key#5 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_lock_key ->&____s->seqcount#2 ->&rq->__lock ->&n->list_lock ->remove_cache_srcu ->&lock->wait_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 10 BD: 238 +.+.: &group->inotify_data.idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 3 BD: 2 +.+.: &mark->lock ->&fsnotify_mark_srcu ->&conn->lock FD: 1 BD: 7 +.+.: &conn->lock FD: 1 BD: 1 +.+.: &evdev->client_lock FD: 237 BD: 1 +.+.: &evdev->mutex ->&dev->mutex#2 ->&mm->mmap_lock FD: 253 BD: 7 +.+.: sk_lock-AF_NETLINK ->slock-AF_NETLINK ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->pcpu_alloc_mutex ->&obj_hash[i].lock ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->rcu_node_0 ->&rq->__lock ->stock_lock ->&f->f_lock ->&rcu_state.expedited_wq ->clock-AF_NETLINK FD: 30 BD: 8 +...: slock-AF_NETLINK ->&sk->sk_lock.wq FD: 1 BD: 3772 ..-.: rlock-AF_NETLINK FD: 1 BD: 7 ....: &nlk->wait FD: 134 BD: 82 +.+.: (work_completion)(&ht->run_work) ->&ht->mutex ->&rq->__lock FD: 132 BD: 83 +.+.: &ht->mutex ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->rhashtable_bucket ->&ht->lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&meta->lock ->&____s->seqcount#2 ->quarantine_lock FD: 1 BD: 3517 ....: rhashtable_bucket/1 FD: 12 BD: 94 +.+.: &ht->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 10 +...: clock-AF_NETLINK FD: 1 BD: 7 ....: genl_sk_destructing_waitq.lock FD: 1 BD: 7 ....: wlock-AF_NETLINK FD: 184 BD: 2 +.+.: (work_completion)(&w->w) ->nfc_devlist_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 29 BD: 8 +.+.: &genl_data->genl_data_mutex ->&rq->__lock ->rcu_node_0 FD: 1 BD: 4 +...: &rdev->beacon_registrations_lock FD: 1 BD: 76 +...: &rdev->mgmt_registrations_lock FD: 1 BD: 80 +...: &wdev->pmsr_lock FD: 1 BD: 73 +.+.: reg_indoor_lock FD: 1088 BD: 1 .+.+: sb_writers#6 ->mount_lock ->&sb->s_type->i_mutex_key#10 FD: 2 BD: 8 +.+.: &sk->sk_peer_lock ->&sk->sk_peer_lock/1 FD: 1 BD: 4124 ....: key#6 FD: 1 BD: 4124 ....: key#7 FD: 1 BD: 4124 ....: key#8 FD: 32 BD: 7 ....: &group->notification_waitq ->&p->pi_lock ->&ep->lock FD: 1 BD: 7 +.+.: &group->notification_lock FD: 1 BD: 1 ....: &client->wait FD: 532 BD: 1 +.+.: &pipe->mutex/1 ->&pipe->rd_wait ->&rq->__lock ->&lock->wait_lock ->&pipe->wr_wait ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&mm->mmap_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->stock_lock ->sk_lock-AF_NETLINK ->slock-AF_NETLINK ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->&c->lock ->nfnl_subsys_ctnetlink ->purge_vmap_area_lock ->&sighand->siglock ->remove_cache_srcu ->&p->pi_lock ->&base->lock ->&rcu_state.expedited_wq ->rlock-AF_NETLINK ->&n->list_lock ->&____s->seqcount#2 ->nfnl_subsys_nftables ->&nft_net->commit_mutex ->&data->lock ->sk_lock-AF_INET6 ->slock-AF_INET6 FD: 32 BD: 4 ....: &pipe->rd_wait ->&p->pi_lock ->&ep->lock FD: 1 BD: 4121 ....: &sem->wait_lock FD: 32 BD: 4 ....: &pipe->wr_wait ->&p->pi_lock ->&ep->lock FD: 48 BD: 1 .+.+: sb_writers#7 ->tk_core.seq.seqcount ->mount_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 4097 ....: key#9 FD: 44 BD: 5 +.+.: &sb->s_type->i_mutex_key#12/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 4 BD: 4196 +.+.: &dentry->d_lock/2 ->&dentry->d_lock/3 FD: 3 BD: 4197 +.+.: &dentry->d_lock/3 ->&____s->seqcount#4 FD: 1 BD: 4199 +.+.: &____s->seqcount#4/1 FD: 252 BD: 3 +.+.: sk_lock-AF_UNIX ->slock-AF_UNIX ->&rq->__lock ->&obj_hash[i].lock ->&psock->ingress_lock ->&mm->mmap_lock ->fs_reclaim ->&c->lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->stock_lock ->pcpu_alloc_mutex ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->reuseport_lock FD: 1 BD: 4 +...: slock-AF_UNIX FD: 1 BD: 1 ....: &rs->lock#2 FD: 55 BD: 3 +.+.: oom_adj_mutex ->&p->alloc_lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->oom_adj_mutex.wait_lock FD: 212 BD: 2 +.+.: &ep->mtx ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&f->f_lock ->&ei->socket.wq.wait ->&ep->lock ->&group->notification_waitq ->&group->notification_lock ->&sighand->signalfd_wqh ->&sighand->siglock ->&pipe->rd_wait ->&obj_hash[i].lock ->key#11 ->&rq->__lock ->&lock->wait_lock ->&pipe->wr_wait ->rcu_node_0 ->stock_lock ->rlock-AF_PACKET ->wlock-AF_PACKET ->wakeup_ida.xa_lock ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->uevent_sock_mutex ->subsys mutex#15 ->events_lock ->&dentry->d_lock ->&ws->lock ->&u->lock ->&ACCESS_PRIVATE(sdp, lock) ->wakeup_srcu ->&x->wait#3 ->(&ws->timer) ->&base->lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->remove_cache_srcu ->deleted_ws.lock ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&sem->wait_lock ->&p->pi_lock ->quarantine_lock ->&cfs_rq->removed.lock ->&ep->mtx/1 ->&ep->poll_wait ->kernfs_idr_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->uevent_sock_mutex.wait_lock ->&rcu_state.expedited_wq FD: 214 BD: 1 +.+.: epnested_mutex ->&ep->mtx ->&ep->mtx/1 ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock ->epnested_mutex.wait_lock FD: 31 BD: 3795 ...-: &ep->lock ->&ep->wq ->&ws->lock FD: 32 BD: 148 ....: &sighand->signalfd_wqh ->&ep->lock ->&p->pi_lock FD: 917 BD: 1 .+.+: sb_writers#8 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&wb->list_lock ->&type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->remove_cache_srcu ->&c->lock ->&n->list_lock ->&rq->__lock ->&root->kernfs_iattr_rwsem ->&dentry->d_lock ->tomoyo_ss ->&sb->s_type->i_mutex_key#13 ->iattr_mutex ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&____s->seqcount ->&xattrs->lock ->&cfs_rq->removed.lock FD: 29 BD: 152 ..-.: &x->wait#26 ->&p->pi_lock FD: 3 BD: 10 +.+.: swap_lock ->&p->lock#2 FD: 156 BD: 1 .+.+: kn->active ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&k->list_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->remove_cache_srcu FD: 144 BD: 67 +.+.: &kernfs_locks->open_file_mutex[count] ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount ->&rq->__lock ->remove_cache_srcu ->&n->list_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&lock->wait_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->&group->rtpoll_trigger_lock ->&p->pi_lock ->&x->wait ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&cfs_rq->removed.lock FD: 131 BD: 2 +.+.: &sb->s_type->i_mutex_key#13 ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 915 BD: 6 +.+.: &of->mutex ->&rq->__lock ->&p->pi_lock ->cgroup_mutex ->cgroup_mutex.wait_lock FD: 29 BD: 3796 ..-.: &ep->wq ->&p->pi_lock FD: 155 BD: 1 .+.+: kn->active#2 ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->uevent_sock_mutex ->&obj_hash[i].lock ->&n->list_lock ->quarantine_lock ->&rq->__lock ->remove_cache_srcu FD: 155 BD: 1 .+.+: kn->active#3 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&rq->__lock ->uevent_sock_mutex ->&obj_hash[i].lock ->&n->list_lock ->quarantine_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 35 BD: 2 +.+.: (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->krc.lock ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 149 BD: 1 .+.+: kn->active#4 ->&rq->__lock ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->param_lock ->&c->lock ->pool_lock#2 ->&on->poll ->&n->list_lock ->remove_cache_srcu FD: 129 BD: 245 +.+.: iattr_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->tk_core.seq.seqcount ->&rq->__lock FD: 1 BD: 74 +.+.: disk_events_mutex FD: 185 BD: 1 .+.+: kn->active#5 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->quarantine_lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&rq->__lock ->&device->physical_node_lock ->udc_lock ->remove_cache_srcu ->fw_lock ->rcu_node_0 ->&lock->wait_lock ->&p->pi_lock ->uevent_sock_mutex.wait_lock ->&rfkill->lock ->&cfs_rq->removed.lock ->&base->lock ->&____s->seqcount#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq FD: 145 BD: 1 .+.+: kn->active#6 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 145 BD: 1 .+.+: kn->active#7 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 145 BD: 1 .+.+: kn->active#8 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount FD: 145 BD: 1 .+.+: kn->active#9 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 145 BD: 1 .+.+: kn->active#10 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&____s->seqcount FD: 145 BD: 1 .+.+: kn->active#11 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 145 BD: 1 .+.+: kn->active#12 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 145 BD: 1 .+.+: kn->active#13 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 145 BD: 1 .+.+: kn->active#14 ->fs_reclaim ->&c->lock ->&n->list_lock ->&____s->seqcount ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->&obj_hash[i].lock FD: 146 BD: 1 .+.+: kn->active#15 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&n->list_lock ->&rq->__lock ->&____s->seqcount ->remove_cache_srcu FD: 146 BD: 1 .+.+: kn->active#16 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&n->list_lock FD: 145 BD: 1 .+.+: kn->active#17 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 145 BD: 1 .+.+: kn->active#18 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->remove_cache_srcu FD: 146 BD: 1 .+.+: kn->active#19 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock FD: 146 BD: 1 .+.+: kn->active#20 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock FD: 150 BD: 1 .+.+: kn->active#21 ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&dev->power.lock ->pci_lock FD: 145 BD: 1 .+.+: kn->active#22 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 2 BD: 8 ....: pci_lock ->pci_config_lock FD: 145 BD: 1 .+.+: kn->active#23 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 31 BD: 1 ..-.: lib/debugobjects.c:101 FD: 33 BD: 2 +.+.: (debug_obj_work).work ->pool_lock#2 ->&rq->__lock ->quarantine_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&meta->lock ->kfence_freelist_lock FD: 145 BD: 1 .+.+: kn->active#24 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 145 BD: 1 .+.+: kn->active#25 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 145 BD: 1 .+.+: kn->active#26 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 144 BD: 1 .+.+: kn->active#27 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->remove_cache_srcu FD: 145 BD: 1 .+.+: kn->active#28 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount FD: 145 BD: 1 .+.+: kn->active#29 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&rq->__lock ->&____s->seqcount ->remove_cache_srcu FD: 144 BD: 1 .+.+: kn->active#30 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->remove_cache_srcu ->&n->list_lock FD: 145 BD: 1 .+.+: kn->active#31 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 144 BD: 1 .+.+: kn->active#32 ->fs_reclaim ->remove_cache_srcu ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 145 BD: 1 .+.+: kn->active#33 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&n->list_lock FD: 1 BD: 1 +.+.: &mousedev->client_lock FD: 38 BD: 8 +.+.: &mousedev->mutex#2 ->&dev->mutex#2 FD: 1 BD: 291 +.+.: rcu_state.exp_mutex.wait_lock FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#14 FD: 57 BD: 1 .+.+: mapping.invalidate_lock#2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#6 ->lock#4 ->tk_core.seq.seqcount ->&dd->lock ->&rq->__lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 218 +.+.: uevent_sock_mutex.wait_lock FD: 145 BD: 1 .+.+: kn->active#34 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 33 BD: 1 ..-.: &(&wb->dwork)->timer FD: 167 BD: 1 +.+.: (wq_completion)writeback ->(work_completion)(&(&wb->dwork)->work) ->(work_completion)(&(&wb->bw_dwork)->work) FD: 165 BD: 2 +.+.: (work_completion)(&(&wb->dwork)->work) ->&wb->work_lock ->&wb->list_lock ->&p->sequence ->key#10 ->&sb->s_type->i_lock_key#22 ->&sbi->s_writepages_rwsem ->pool_lock#2 ->&dd->lock ->&obj_hash[i].lock ->&pl->lock ->&rq->__lock ->&bdi->wb_waitq FD: 2 BD: 4137 +.-.: &p->sequence ->key#13 FD: 1 BD: 4157 ..-.: key#10 FD: 145 BD: 1 .+.+: kn->active#35 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 145 BD: 1 .+.+: kn->active#36 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 145 BD: 1 .+.+: kn->active#37 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount FD: 29 BD: 1 +.-.: (&journal->j_commit_timer) ->&p->pi_lock FD: 109 BD: 140 +.+.: &journal->j_checkpoint_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&base->lock ->bit_wait_table + i ->&rq->__lock ->&journal->j_state_lock ->&fq->mq_flush_lock ->&lock->wait_lock ->&x->wait#26 ->&journal->j_list_lock ->&c->lock ->rcu_node_0 ->&ei->i_es_lock ->&mapping->private_lock ->&meta->lock ->kfence_freelist_lock ->&sb->s_type->i_lock_key#3 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->(&timer.timer) ->&____s->seqcount#2 ->&____s->seqcount ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->&cfs_rq->removed.lock ->quarantine_lock ->&n->list_lock ->&rq_wait->wait FD: 1 BD: 6 ....: &tags->lock FD: 29 BD: 148 ....: &journal->j_wait_transaction_locked ->&p->pi_lock FD: 146 BD: 1 .+.+: kn->active#38 ->fs_reclaim ->&c->lock ->&n->list_lock ->&kernfs_locks->open_file_mutex[count] ->i2c_dev_list_lock FD: 1 BD: 4137 ..-.: &memcg->move_lock FD: 1 BD: 149 +.+.: &sbi->s_md_lock FD: 1 BD: 1 ....: &journal->j_fc_wait FD: 1 BD: 1 +.+.: &journal->j_history_lock FD: 134 BD: 2 +.+.: &dev_instance->mutex ->fs_reclaim ->pool_lock#2 ->vicodec_core:1844:(hdl)->_lock ->&c->lock ->&vdev->fh_lock ->&rq->__lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock ->&obj_hash[i].lock FD: 4 BD: 3 +.+.: vicodec_core:1844:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 5 ....: &vdev->fh_lock FD: 139 BD: 1 +.+.: &mdev->req_queue_mutex ->&dev_instance->mutex ->&vdev->fh_lock ->&mdev->graph_mutex ->vicodec_core:1844:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&dev->dev_mutex ->&dev->mutex#3 FD: 1 BD: 4 ....: &m2m_dev->job_spinlock FD: 1 BD: 4 ....: &q->done_wq FD: 1 BD: 4 +.+.: &q->mmap_lock FD: 134 BD: 2 +.+.: &dev->dev_mutex ->fs_reclaim ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&obj_hash[i].lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock FD: 4 BD: 3 +.+.: vim2m:1183:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 144 BD: 1 .+.+: kn->active#39 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock ->remove_cache_srcu FD: 1 BD: 1 +.+.: fh->state->lock FD: 1 BD: 1 +.+.: &vcapture->lock FD: 2 BD: 2 +.+.: &dev->mutex#3 ->&vdev->fh_lock FD: 1 BD: 9 +.+.: &sk->sk_peer_lock/1 FD: 31 BD: 1 ..-.: drivers/base/dd.c:321 FD: 39 BD: 2 +.+.: (deferred_probe_timeout_work).work ->device_links_lock ->deferred_probe_mutex ->deferred_probe_work ->&x->wait#10 ->&pool->lock ->&rq->__lock ->&obj_hash[i].lock FD: 1 BD: 3 ....: key#11 FD: 43 BD: 4 +.+.: &sb->s_type->i_mutex_key#4/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock FD: 163 BD: 138 ++++: &sbi->s_writepages_rwsem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->lock#4 ->lock#5 ->&obj_hash[i].lock ->&journal->j_state_lock ->jbd2_handle ->tk_core.seq.seqcount ->&dd->lock ->&rq_wait->wait ->rcu_node_0 ->&rq->__lock ->&xa->xa_lock#6 ->&base->lock ->&mapping->private_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->remove_cache_srcu ->&n->list_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&folio_wait_table[i] ->quarantine_lock ->&rsp->gp_wait ->&ei->i_data_sem ->&rnp->exp_lock ->rcu_state.exp_mutex ->&journal->j_barrier ->&journal->j_wait_transaction_locked ->&meta->lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit FD: 85 BD: 1 .+.+: &type->s_umount_key#40 ->&sb->s_type->i_lock_key#3 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&____s->seqcount ->lock#4 ->lock#5 ->&wb->list_lock ->&rq_wait->wait ->rcu_node_0 ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&____s->seqcount#2 ->lock#11 FD: 1 BD: 4157 ..-.: &s->s_inode_wblist_lock FD: 1 BD: 4158 ..-.: key#12 FD: 33 BD: 1 ..-.: &(&wb->bw_dwork)->timer FD: 69 BD: 2 +.+.: (work_completion)(&(&wb->bw_dwork)->work) ->&wb->list_lock FD: 16 BD: 1 +.-.: (&dom->period_timer) ->key#13 ->&p->sequence ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4159 ..-.: key#13 FD: 145 BD: 1 .+.+: kn->active#40 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 145 BD: 1 .+.+: kn->active#41 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 145 BD: 1 .+.+: kn->active#42 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 144 BD: 1 .+.+: kn->active#43 ->fs_reclaim ->remove_cache_srcu ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 29 BD: 5 +.+.: &lo->lo_mutex ->&rq->__lock ->rcu_node_0 FD: 148 BD: 10 +.+.: &nbd->config_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&bdev->bd_size_lock ->&q->queue_lock ->&ACCESS_PRIVATE(sdp, lock) ->set->srcu ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#3 ->&c->lock ->fs_reclaim ->uevent_sock_mutex FD: 32 BD: 8 ....: &ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&ACCESS_PRIVATE(sdp, lock) FD: 145 BD: 1 .+.+: kn->active#44 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 2 BD: 4 +.+.: &new->lock ->&mtdblk->cache_mutex FD: 1 BD: 5 +.+.: &mtdblk->cache_mutex FD: 145 BD: 1 .+.+: kn->active#45 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 236 BD: 1 +.+.: &mtd->master.chrdev_lock ->&mm->mmap_lock FD: 1 BD: 4 +.+.: destroy_lock FD: 33 BD: 1 ..-.: fs/notify/mark.c:89 FD: 139 BD: 2 +.+.: connector_reaper_work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->pool_lock#2 ->pool_lock ->&cfs_rq->removed.lock ->&base->lock ->rcu_node_0 ->&meta->lock ->kfence_freelist_lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) FD: 139 BD: 2 +.+.: (reaper_work).work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&x->wait#3 ->&rq->__lock ->pool_lock#2 ->&base->lock ->pool_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 1 +.+.: userns_state_mutex FD: 4 BD: 76 +...: fib_info_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 118 BD: 76 +...: &net->sctp.local_addr_lock ->&net->sctp.addr_wq_lock FD: 117 BD: 78 +.-.: &net->sctp.addr_wq_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->k-slock-AF_INET6/1 ->slock-AF_INET6/1 ->slock-AF_INET/1 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 72 +...: _xmit_LOOPBACK FD: 28 BD: 79 .+.+: netpoll_srcu ->&rq->__lock FD: 19 BD: 93 +.-.: &in_dev->mc_tomb_lock ->pool_lock#2 ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&____s->seqcount#2 ->&data->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock FD: 36 BD: 90 +.-.: &im->lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&base->lock ->&data->lock ->init_task.mems_allowed_seq.seqcount ->&pgdat->kswapd_wait FD: 1 BD: 79 +.+.: cbs_list_lock FD: 31 BD: 78 +...: &net->ipv6.addrconf_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 32 BD: 3555 +...: &ifa->lock ->batched_entropy_u32.lock ->crngs.lock ->&obj_hash[i].lock ->&base->lock FD: 45 BD: 3556 +...: &tb->tb6_lock ->&net->ipv6.fib6_walker_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->&n->list_lock ->rt6_exception_lock ->&data->fib_event_queue_lock ->quarantine_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->stock_lock ->&meta->lock FD: 1 BD: 3557 ++..: &net->ipv6.fib6_walker_lock FD: 471 BD: 72 +.+.: sk_lock-AF_INET ->slock-AF_INET ->&table->hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&queue->rskq_lock ->clock-AF_INET ->&obj_hash[i].lock ->&base->lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&mm->mmap_lock ->tk_core.seq.seqcount ->&sd->defer_lock ->mmu_notifier_invalidate_range_start ->&hashinfo->ehash_locks[i] ->elock-AF_INET ->&rq->__lock ->&n->list_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->&____s->seqcount#8 ->once_mutex ->batched_entropy_u32.lock ->batched_entropy_u16.lock ->&ei->socket.wq.wait ->quarantine_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->stock_lock ->&sem->wait_lock ->&p->pi_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&sctp_port_hashtable[i].lock ->crngs.lock ->&asoc->wait ->crypto_alg_sem ->(kmod_concurrent_max).lock ->&x->wait#17 ->running_helpers_waitq.lock ->(crypto_chain).rwsem ->&x->wait#21 ->(&timer.timer) ->&sctp_ep_hashtable[i].lock ->&dir->lock ->sk_lock-AF_INET/1 ->krc.lock ->lock ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET/1 ->k-slock-AF_INET ->k-clock-AF_INET ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->&msk->pm.lock ->k-sk_lock-AF_INET ->cpu_hotplug_lock ->&in_dev->mc_tomb_lock ->&im->lock ->tcp_md5sig_mutex ->&net->xfrm.xfrm_policy_lock ->&f->f_lock ->_xmit_ETHER ->free_vmap_area_lock ->vmap_area_lock ->pcpu_alloc_mutex ->init_mm.page_table_lock ->(console_sem).lock ->&data->lock ->(&tw->tw_timer) ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->l2tp_ip_lock ->&dccp_hashinfo.bhash[i].lock ->key#27 ->pack_mutex ->text_mutex ->&fp->aux->used_maps_mutex ->hrtimer_bases.lock ->&sighand->siglock ->sctp_assocs_id_lock ->&f->f_owner.lock ->&psock->ingress_lock FD: 91 BD: 77 +.-.: slock-AF_INET ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->tk_core.seq.seqcount ->(&req->rsk_timer) ->&base->lock ->&queue->rskq_lock ->pool_lock#2 ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->elock-AF_INET ->&____s->seqcount#2 ->&sk->sk_lock.wq ->&n->list_lock ->krc.lock ->key#24 ->&data->lock ->&dccp_hashinfo.bhash[i].lock ->&lruvec->lru_lock ->&net->xfrm.xfrm_policy_lock ->&policy->lock ->&list->lock#33 ->hrtimer_bases.lock ->batched_entropy_u32.lock ->&meta->lock FD: 3 BD: 120 ++..: clock-AF_INET ->&obj_hash[i].lock FD: 494 BD: 74 +.+.: sk_lock-AF_INET6 ->slock-AF_INET6 ->&table->hash[i].lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->fs_reclaim ->&mm->mmap_lock ->once_lock ->&pool->lock ->rcu_node_0 ->&rq->__lock ->&n->list_lock ->tk_core.seq.seqcount ->clock-AF_INET6 ->&____s->seqcount#2 ->remove_cache_srcu ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&zone->lock ->&sctp_port_hashtable[i].lock ->crngs.lock ->&base->lock ->&asoc->wait ->stock_lock ->krc.lock ->sctp_assocs_id_lock ->&list->lock#27 ->&meta->lock ->&ei->socket.wq.wait ->&queue->rskq_lock ->cpu_hotplug_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->k-sk_lock-AF_INET6 ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->&msk->pm.lock ->elock-AF_INET6 ->&net->xfrm.xfrm_policy_lock ->quarantine_lock ->&idev->mc_lock ->&sighand->siglock ->(console_sem).lock ->console_owner_lock ->console_owner ->lock ->ip6_sk_fl_lock ->crypto_alg_sem ->&sctp_ep_hashtable[i].lock ->&hashinfo->ehash_locks[i] ->&list->lock#40 ->ip6_fl_lock ->sk_lock-AF_INET6/1 ->&data->lock ->&token_hash[i].lock ->key#27 ->&f->f_owner.lock ->&f->f_lock ->tcpv6_prot_mutex ->device_spinlock FD: 90 BD: 97 +.-.: slock-AF_INET6 ->&obj_hash[i].lock ->elock-AF_INET6 ->pool_lock#2 ->&sk->sk_lock.wq ->&tcp_hashinfo.bhash[i].lock ->tk_core.seq.seqcount ->&base->lock ->&zone->lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&list->lock#27 ->batched_entropy_u32.lock ->&n->list_lock ->&list->lock#40 ->quarantine_lock ->krc.lock ->crngs.lock ->sctp_assocs_id_lock ->&asoc->wait ->batched_entropy_u8.lock ->kfence_freelist_lock ->&net->xfrm.xfrm_policy_lock ->&policy->lock ->&list->lock#33 ->batched_entropy_u16.lock ->&hashinfo->ehash_locks[i] ->&queue->rskq_lock ->clock-AF_INET6 ->&data->lock ->&meta->lock ->key#24 FD: 34 BD: 119 ++--: clock-AF_INET6 ->rds_tcp_tc_list_lock ->tk_core.seq.seqcount FD: 965 BD: 1 +.+.: &f->f_pos_lock ->sb_writers#5 ->&p->lock ->&type->i_mutex_dir_key#4 ->&mm->mmap_lock ->sb_writers#3 ->sb_writers#4 ->&rq->__lock ->fs_reclaim ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&lock->wait_lock ->sb_writers#11 ->&type->i_mutex_dir_key#3 ->rcu_node_0 ->&rcu_state.expedited_wq ->sb_writers#10 ->(console_sem).lock ->console_owner_lock ->console_owner ->tk_core.seq.seqcount FD: 145 BD: 1 .+.+: kn->active#46 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 525 BD: 72 ++++: dev_addr_sem ->net_rwsem ->&tn->lock ->&sdata->sec_mtx ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->nl_table_lock ->rlock-AF_NETLINK ->rcu_node_0 ->&rq->__lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&tbl->lock ->&pn->hash_lock ->input_pool.lock ->&____s->seqcount ->&br->lock ->team->team_lock_key#3 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->quarantine_lock ->remove_cache_srcu ->_xmit_ETHER ->&hard_iface->bat_iv.ogm_buff_mutex ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->team->team_lock_key#7 ->team->team_lock_key#8 ->team->team_lock_key#9 ->team->team_lock_key#10 ->team->team_lock_key#11 FD: 947 BD: 2 +.+.: nlk_cb_mutex-GENERIC ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->rtnl_mutex ->&rdev->wiphy.mtx ->rlock-AF_NETLINK ->&obj_hash[i].lock ->&c->lock ->&rq->__lock ->&devlink->lock_key#4 ->genl_mutex ->&____s->seqcount#2 ->&devlink->lock_key#7 ->&lock->wait_lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->&n->list_lock ->genl_mutex.wait_lock ->(console_sem).lock ->&devlink->lock_key#8 ->&devlink->lock_key#9 ->&devlink->lock_key#10 ->&dir->lock#2 ->&devlink->lock_key#11 FD: 21 BD: 82 +...: &rdev->bss_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock FD: 118 BD: 1 +.-.: (&net->sctp.addr_wq_timer) ->&net->sctp.addr_wq_lock FD: 2 BD: 4157 ..-.: &pl->lock ->key#12 FD: 14 BD: 72 ++..: lapb_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock FD: 4 BD: 72 ++.-: x25_neigh_list_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 72 +...: _xmit_SLIP FD: 15 BD: 1 +.-.: (&eql->timer) ->&eql->queue.lock ->&obj_hash[i].lock ->&base->lock FD: 5 BD: 75 +.-.: &eql->queue.lock ->&obj_hash[i].lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4128 ....: key#14 FD: 1 BD: 72 +...: &vi->refill_lock FD: 62 BD: 3444 +.-.: _xmit_ETHER#2 ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&data->lock FD: 140 BD: 81 +.+.: &local->chanctx_mtx ->fs_reclaim ->pool_lock#2 ->&data->mutex ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&local->queue_stop_reason_lock ->rcu_node_0 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->nl_table_lock ->nl_table_wait.lock ->&rdev->bss_lock ->&n->list_lock FD: 1 BD: 82 +.+.: &data->mutex FD: 19 BD: 3464 +...: &local->filter_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock FD: 21 BD: 1 +.+.: (wq_completion)phy0 ->(work_completion)(&local->reconfig_filter) FD: 20 BD: 93 +.+.: (work_completion)(&local->reconfig_filter) ->&local->filter_lock FD: 87 BD: 75 +.-.: &dev->tx_global_lock ->_xmit_ETHER#2 ->&obj_hash[i].lock ->&base->lock ->&qdisc_xmit_lock_key ->&qdisc_xmit_lock_key#2 ->&vlan_netdev_xmit_lock_key ->_xmit_NETROM ->_xmit_NONE#2 ->_xmit_TUNNEL6#2 ->_xmit_SIT#2 ->_xmit_TUNNEL#2 ->_xmit_IPGRE#2 ->&batadv_netdev_xmit_lock_key ->&qdisc_xmit_lock_key#3 ->&qdisc_xmit_lock_key#4 ->_xmit_LOOPBACK#2 ->_xmit_PIMREG#2 ->&qdisc_xmit_lock_key#5 FD: 19 BD: 3430 +.-.: &sch->q.lock ->tk_core.seq.seqcount ->hrtimer_bases.lock ->crngs.lock ->batched_entropy_u16.lock ->batched_entropy_u32.lock ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 FD: 1 BD: 79 ....: class FD: 1 BD: 79 ....: (&tbl->proxy_timer) FD: 21 BD: 1 +.+.: (wq_completion)phy1 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 72 +...: _xmit_VOID FD: 1 BD: 89 ....: &____s->seqcount#8 FD: 9 BD: 3506 +.-.: &ul->lock ->pool_lock#2 ->&dir->lock#2 FD: 1 BD: 72 +...: _xmit_X25 FD: 15 BD: 73 +...: &lapbeth->up_lock ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock FD: 66 BD: 73 +.-.: &lapb->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->&list->lock#6 ->&list->lock#7 ->&n->list_lock ->&____s->seqcount#2 FD: 3 BD: 146 +.+.: &(ei->i_block_reservation_lock) ->key#15 ->key#3 FD: 908 BD: 2 +.+.: (work_completion)(&work->work) ->devices_rwsem ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock ->&base->lock ->&rq->__lock ->quarantine_lock FD: 872 BD: 2 +.+.: (work_completion)(&(&ifa->dad_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 3557 +.-.: rt6_exception_lock FD: 1 BD: 149 ....: &tty->ctrl.lock FD: 5 BD: 84 +.+.: fasync_lock ->&new->fa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: &buf->lock FD: 1 BD: 7 ....: &tty->flow.lock FD: 31 BD: 1 ..-.: &(&idev->mc_dad_work)->timer FD: 221 BD: 1 +.+.: (wq_completion)mld ->(work_completion)(&(&idev->mc_dad_work)->work) ->(work_completion)(&(&idev->mc_ifc_work)->work) ->&rq->__lock FD: 219 BD: 2 +.+.: (work_completion)(&(&idev->mc_dad_work)->work) ->&idev->mc_lock FD: 267 BD: 4 +.+.: &ldata->atomic_read_lock ->&tty->termios_rwsem ->(work_completion)(&buf->work) ->&rq->__lock FD: 1 BD: 5 +.+.: (work_completion)(&buf->work) FD: 79 BD: 72 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->_xmit_ETHER#2 ->_xmit_SLIP#2 ->_xmit_NETROM ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&c->lock ->&r->producer_lock#4 ->&sch->q.lock ->&data->lock FD: 28 BD: 3 +.+.: &net->packet.sklist_lock ->&rq->__lock FD: 259 BD: 3 +.+.: sk_lock-AF_PACKET ->slock-AF_PACKET ->&po->bind_lock ->&obj_hash[i].lock ->&rq->__lock ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->pcpu_alloc_mutex ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&c->lock ->&rnp->exp_wq[0] ->&n->list_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->stock_lock ->&po->pg_vec_lock ->init_mm.page_table_lock ->&sem->wait_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&f->f_lock ->remove_cache_srcu ->&base->lock ->pcpu_lock ->purge_vmap_area_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 30 BD: 4 +...: slock-AF_PACKET ->&sk->sk_lock.wq FD: 39 BD: 74 +.+.: &po->bind_lock ->ptype_lock ->pool_lock#2 ->&dir->lock#2 ->&match->lock ->&obj_hash[i].lock FD: 1 BD: 3552 +.-.: rlock-AF_PACKET FD: 1 BD: 132 +...: wlock-AF_PACKET FD: 31 BD: 1 ..-.: &(&dm_bufio_cleanup_old_work)->timer FD: 16 BD: 1 +.+.: (wq_completion)dm_bufio_cache ->(work_completion)(&(&dm_bufio_cleanup_old_work)->work) FD: 15 BD: 2 +.+.: (work_completion)(&(&dm_bufio_cleanup_old_work)->work) ->dm_bufio_clients_lock ->&obj_hash[i].lock ->&base->lock FD: 31 BD: 1 ..-.: &(&idev->mc_ifc_work)->timer FD: 219 BD: 2 +.+.: (work_completion)(&(&idev->mc_ifc_work)->work) ->&idev->mc_lock ->&rq->__lock FD: 14 BD: 3447 +.-.: &ul->lock#2 ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&n->list_lock FD: 19 BD: 3499 ++--: &n->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&(&n->ha_lock)->lock ->&____s->seqcount#9 ->&c->lock ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&data->lock ->quarantine_lock ->&n->list_lock ->batched_entropy_u8.lock FD: 1 BD: 3501 +.--: &____s->seqcount#9 FD: 1 BD: 3504 ...-: &____s->seqcount#10 FD: 42 BD: 2 +.+.: (work_completion)(&w->work)#2 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->nf_conntrack_mutex ->&rq->__lock ->nf_conntrack_mutex.wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&meta->lock ->kfence_freelist_lock ->&base->lock FD: 31 BD: 1 ..-.: &(&ifa->dad_work)->timer FD: 1 BD: 113 +.-.: &ct->lock FD: 138 BD: 3 +.+.: fanout_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->&po->bind_lock ->&n->list_lock ->&rq->__lock ->&obj_hash[i].lock FD: 1 BD: 3 +...: clock-AF_PACKET FD: 1 BD: 3 ..-.: elock-AF_PACKET FD: 96 BD: 146 +.+.: &lg->lg_mutex ->&ei->i_prealloc_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&xa->xa_lock#6 ->lock#4 ->&mapping->private_lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&rq->__lock ->key#3 ->rcu_node_0 ->&obj_hash[i].lock ->&____s->seqcount#2 ->bit_wait_table + i ->&bgl->locks[i].lock FD: 1 BD: 149 +.+.: &pa->pa_lock FD: 1 BD: 149 +.+.: &lg->lg_prealloc_lock FD: 29 BD: 148 ..-.: &rq_wait->wait ->&p->pi_lock FD: 33 BD: 3 ..-.: &ei->i_completed_io_lock FD: 153 BD: 1 +.+.: (wq_completion)ext4-rsv-conversion ->(work_completion)(&ei->i_rsv_conversion_work) FD: 152 BD: 2 +.+.: (work_completion)(&ei->i_rsv_conversion_work) ->&ei->i_completed_io_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->pool_lock#2 ->&ext4__ioend_wq[i] ->&ret->b_uptodate_lock ->&folio_wait_table[i] ->&rq->__lock ->rcu_node_0 ->mmu_notifier_invalidate_range_start ->quarantine_lock ->&rcu_state.expedited_wq ->&meta->lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&c->lock ->&lruvec->lru_lock ->&base->lock ->batched_entropy_u8.lock FD: 1 BD: 148 ....: &journal->j_wait_reserved FD: 1 BD: 3 ....: &ext4__ioend_wq[i] FD: 88 BD: 1 +.-.: (&dev->watchdog_timer) ->&dev->tx_global_lock FD: 67 BD: 1 +.-.: (&lapb->t1timer) ->&lapb->lock FD: 31 BD: 1 ..-.: drivers/regulator/core.c:6266 FD: 4 BD: 2 +.+.: (regulator_init_complete_work).work ->&k->list_lock ->&k->k_lock FD: 9 BD: 3503 +.-.: &nf_conntrack_locks[i] ->&nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 8 BD: 3504 +.-.: &nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 119 +.-.: &hashinfo->ehash_locks[i] FD: 2 BD: 3500 +.-.: &(&n->ha_lock)->lock ->&____s->seqcount#9 FD: 1 BD: 3499 +.-.: lock#8 FD: 1 BD: 3501 ..-.: id_table_lock FD: 1 BD: 93 ..-.: (&req->rsk_timer) FD: 1 BD: 110 +.-.: &queue->rskq_lock FD: 19 BD: 110 +.-.: tcp_metrics_lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->krc.lock FD: 82 BD: 79 +.-.: slock-AF_INET/1 ->tk_core.seq.seqcount ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock ->&base->lock ->&____s->seqcount ->&zone->lock ->&n->list_lock ->quarantine_lock ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET ->batched_entropy_u8.lock ->&____s->seqcount#2 ->&sctp_ep_hashtable[i].lock ->clock-AF_INET ->krc.lock ->&sctp_port_hashtable[i].lock ->&data->lock ->key#25 ->init_task.mems_allowed_seq.seqcount ->hrtimer_bases.lock ->&f->f_owner.lock FD: 1 BD: 73 +.-.: &sd->defer_lock FD: 102 BD: 1 +.-.: (&icsk->icsk_delack_timer) ->slock-AF_INET ->k-slock-AF_INET6 FD: 1 BD: 97 ..-.: elock-AF_INET FD: 114 BD: 1 +.-.: (&icsk->icsk_retransmit_timer) ->slock-AF_INET ->slock-AF_INET6 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->stock_lock FD: 1 BD: 147 ....: key#15 FD: 87 BD: 145 +.+.: &sbi->s_orphan_lock ->&rq->__lock ->&ret->b_state_lock ->&ei->i_raw_lock ->rcu_node_0 ->&lock->wait_lock ->&mapping->private_lock ->bit_wait_table + i ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&rcu_state.expedited_wq FD: 145 BD: 1 .+.+: kn->active#47 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 33 BD: 3 +.+.: (work_completion)(work) ->lock#4 ->lock#5 ->&rq->__lock FD: 1 BD: 1 +.+.: &futex_queues[i].lock FD: 1 BD: 4 ....: &on->poll FD: 1 BD: 4 +.+.: module_mutex FD: 3 BD: 73 +.+.: once_mutex ->crngs.lock FD: 237 BD: 1 .+.+: sb_writers#9 ->&attr->mutex ->&mm->mmap_lock FD: 236 BD: 2 +.+.: &attr->mutex ->&mm->mmap_lock FD: 146 BD: 1 +.+.: &type->s_umount_key#41/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#30 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 41 BD: 4125 +.+.: &sb->s_type->i_lock_key#30 ->&dentry->d_lock FD: 911 BD: 2 .+.+: sb_writers#10 ->mount_lock ->&type->i_mutex_dir_key#6 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&type->i_mutex_dir_key#6/1 ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&c->lock ->&n->list_lock ->rcu_node_0 FD: 115 BD: 3 ++++: &type->i_mutex_dir_key#6 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#30 ->&sem->wait_lock ->&rq->__lock ->&xa->xa_lock#12 ->&obj_hash[i].lock ->stock_lock ->&n->list_lock FD: 145 BD: 1 ++++: kn->active#48 ->fs_reclaim ->&c->lock ->&n->list_lock ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#15 FD: 146 BD: 1 +.+.: &type->s_umount_key#42/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->&root->kernfs_supers_rwsem ->&dentry->d_lock ->&c->lock ->&n->list_lock FD: 41 BD: 4138 +.+.: &sb->s_type->i_lock_key#31 ->&dentry->d_lock FD: 131 BD: 1 ++++: &type->s_umount_key#43 ->shrinker_rwsem ->percpu_ref_switch_lock ->&root->kernfs_supers_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#31 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->&lru->node[i].lock ->&rq->__lock ->&wb->list_lock FD: 881 BD: 2 +.+.: (work_completion)(&cgrp->bpf.release_work) ->cgroup_mutex ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 887 BD: 1 +.+.: (wq_completion)cgroup_destroy ->(work_completion)(&css->destroy_work) ->(work_completion)(&(&css->destroy_rwork)->work) FD: 881 BD: 2 +.+.: (work_completion)(&css->destroy_work) ->cgroup_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 885 BD: 2 +.+.: (work_completion)(&(&css->destroy_rwork)->work) ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 ->&cgrp->pidlist_mutex ->(wq_completion)cgroup_pidlist_destroy ->&wq->mutex ->(work_completion)(&cgrp->release_agent_work) ->cgroup_mutex ->cgroup_rstat_lock ->pcpu_lock ->&root->kernfs_rwsem ->kernfs_idr_lock FD: 132 BD: 12 +.+.: &cgrp->pidlist_mutex ->css_set_lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&base->lock FD: 134 BD: 3 +.+.: (wq_completion)cgroup_pidlist_destroy ->(work_completion)(&(&l->destroy_dwork)->work) FD: 1 BD: 3 +.+.: (work_completion)(&cgrp->release_agent_work) FD: 919 BD: 2 .+.+: sb_writers#11 ->mount_lock ->&type->i_mutex_dir_key#7 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&c->lock ->&type->i_mutex_dir_key#7/1 ->&p->lock ->tk_core.seq.seqcount ->stock_lock ->&rq->__lock ->&sb->s_type->i_lock_key#31 ->&wb->list_lock ->&n->list_lock ->&sem->wait_lock ->&p->pi_lock FD: 117 BD: 3 ++++: &type->i_mutex_dir_key#7 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->&c->lock ->pool_lock#2 ->&xa->xa_lock#12 ->&obj_hash[i].lock ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->&sem->wait_lock FD: 1 BD: 18 +.+.: &dom->lock FD: 145 BD: 1 .+.+: kn->active#49 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#16 FD: 304 BD: 1 .+.+: kn->active#50 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock FD: 43 BD: 3 +.+.: &type->s_umount_key#44 ->sb_lock ->&dentry->d_lock FD: 150 BD: 2 +.+.: &sb->s_type->i_mutex_key#17 ->namespace_sem ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->pin_fs_lock ->&c->lock ->sb_lock ->&type->s_umount_key#44 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->mount_lock ->&obj_hash[i].lock ->entries_lock FD: 251 BD: 1 .+.+: sb_writers#12 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&sb->s_type->i_mutex_key#17 FD: 1 BD: 78 +...: &pn->hash_lock FD: 46 BD: 1 +...: &net->ipv6.fib6_gc_lock ->&obj_hash[i].lock FD: 1 BD: 72 +...: _xmit_IEEE802154 FD: 32 BD: 1 +.-.: (&timer) ->&obj_hash[i].lock ->&base->lock ->&txlock ->&txwq FD: 1 BD: 3568 ..-.: &list->lock#5 FD: 8 BD: 73 +...: _xmit_SLIP#2 ->&eql->queue.lock FD: 26 BD: 77 +...: _xmit_NETROM ->(console_sem).lock ->console_owner_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rdev->wpan_phy.queue_lock ->&rdev->wpan_phy.sync_txq ->&data->lock FD: 18 BD: 1 +...: _xmit_X25#2 ->&lapbeth->up_lock FD: 129 BD: 8 +.+.: swap_cgroup_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 FD: 138 BD: 8 +.+.: swapon_mutex ->fs_reclaim ->pool_lock#2 ->swap_lock ->percpu_ref_switch_lock ->(console_sem).lock FD: 2 BD: 4111 +.+.: &p->lock#2 ->swap_avail_lock FD: 1 BD: 4112 +.+.: swap_avail_lock FD: 1 BD: 8 ....: proc_poll_wait.lock FD: 302 BD: 1 +.+.: swap_slots_cache_enable_mutex ->cpu_hotplug_lock ->swap_lock FD: 28 BD: 4108 +.+.: swap_slots_cache_mutex ->&rq->__lock FD: 31 BD: 1 ..-.: net/wireless/reg.c:236 FD: 872 BD: 2 +.+.: (reg_check_chans).work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 31 BD: 1 ..-.: net/wireless/reg.c:533 FD: 872 BD: 2 +.+.: (crda_timeout).work ->rtnl_mutex FD: 79 BD: 1 +.-.: (&n->timer) ->&n->lock ->pool_lock#2 ->&c->lock ->&dir->lock#2 ->&n->list_lock ->&ul->lock#2 ->&obj_hash[i].lock ->icmp_global.lock ->&dir->lock ->stock_lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&data->lock ->quarantine_lock ->batched_entropy_u8.lock FD: 92 BD: 1 +.-.: (&sk->sk_timer) ->slock-AF_INET FD: 1 BD: 196 ....: &newf->resize_wait FD: 11 BD: 146 ..-.: &kcov->lock ->kcov_remote_lock FD: 161 BD: 1 +.+.: pid_caches_mutex ->&rq->__lock ->slab_mutex FD: 1 BD: 74 ..-.: &list->lock#6 FD: 1 BD: 74 ..-.: &list->lock#7 FD: 1 BD: 1 ..-.: &list->lock#8 FD: 1 BD: 4 +.-.: x25_list_lock FD: 1 BD: 1 +.-.: x25_forward_list_lock FD: 43 BD: 1 +.+.: &type->s_umount_key#45 ->sb_lock ->&dentry->d_lock FD: 144 BD: 1 ++++: &sb->s_type->i_mutex_key#18 ->namespace_sem ->&dentry->d_lock ->tk_core.seq.seqcount FD: 1 BD: 24 ++++: hci_sk_list.lock FD: 1 BD: 1 +.+.: (work_completion)(&(&data->open_timeout)->work) FD: 327 BD: 1 +.+.: &data->open_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->&x->wait#9 ->hci_index_ida.xa_lock ->&n->list_lock ->cpu_hotplug_lock ->wq_pool_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#80 ->&dev->devres_lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&rfkill->lock ->hci_dev_list_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->(pm_chain_head).rwsem ->&list->lock#11 ->&data->read_wait ->uevent_sock_mutex.wait_lock ->&data->lock FD: 1 BD: 2 ....: hci_index_ida.xa_lock FD: 3 BD: 23 +.+.: subsys mutex#80 ->&k->k_lock FD: 1 BD: 15 ++++: hci_dev_list_lock FD: 21 BD: 75 +.+.: (wq_completion)phy25 ->(work_completion)(&local->reconfig_filter) FD: 235 BD: 7 +.+.: (work_completion)(&hdev->power_on) ->&hdev->req_lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->&c->lock ->&data->lock FD: 234 BD: 9 +.+.: &hdev->req_lock ->&obj_hash[i].lock ->&list->lock#9 ->pool_lock#2 ->&list->lock#10 ->&hdev->req_wait_q ->&base->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&____s->seqcount ->tk_core.seq.seqcount ->hci_sk_list.lock ->&cfs_rq->removed.lock ->pool_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->(work_completion)(&(&hdev->interleave_scan)->work) ->hci_dev_list_lock ->(work_completion)(&hdev->tx_work) ->(work_completion)(&hdev->rx_work) ->&wq->mutex ->&hdev->lock ->&list->lock#11 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->&data->lock ->&n->list_lock ->&____s->seqcount#2 ->(work_completion)(&(&hdev->rpa_expired)->work) FD: 1 BD: 25 ....: &list->lock#9 FD: 1 BD: 10 ....: &list->lock#10 FD: 29 BD: 17 ....: &hdev->req_wait_q ->&p->pi_lock FD: 1 BD: 19 ....: &list->lock#11 FD: 29 BD: 19 ....: &data->read_wait ->&p->pi_lock FD: 230 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->sock_cookie_ida.xa_lock ->&p->alloc_lock ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->clock-AF_BLUETOOTH ->&c->lock ->&rq->__lock ->&____s->seqcount ->&n->list_lock ->mgmt_chan_list_lock ->hci_dev_list_lock ->&data->lock FD: 1 BD: 4 +...: slock-AF_BLUETOOTH-BTPROTO_HCI FD: 1 BD: 4 ....: sock_cookie_ida.xa_lock FD: 21 BD: 75 +.+.: (wq_completion)phy26 ->(work_completion)(&local->reconfig_filter) FD: 133 BD: 16 +.+.: (work_completion)(&hdev->cmd_work) ->&list->lock#9 ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->&list->lock#11 ->&data->read_wait ->&obj_hash[i].lock ->&c->lock ->&rq->__lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&n->list_lock ->&data->lock ->&____s->seqcount#2 FD: 223 BD: 16 +.+.: (work_completion)(&hdev->rx_work) ->&list->lock#9 ->lock#6 ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&c->lock ->&n->list_lock ->&hdev->lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&obj_hash[i].lock ->&hdev->req_wait_q ->&rq->__lock ->&base->lock ->chan_list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&data->lock ->&____s->seqcount#2 FD: 215 BD: 21 +.+.: &hdev->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#9 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&n->list_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&k->k_lock ->subsys mutex#80 ->&list->lock#9 ->&hdev->unregister_lock ->hci_cb_list_lock ->&base->lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&____s->seqcount ->(work_completion)(&(&conn->disc_work)->work) ->(work_completion)(&(&conn->auto_accept_work)->work) ->(work_completion)(&(&conn->idle_work)->work) ->&list->lock#12 ->rcu_node_0 ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->rlock-AF_BLUETOOTH ->&data->lock ->(work_completion)(&(&hdev->discov_off)->work) ->(work_completion)(&(&hdev->service_cache)->work) ->(work_completion)(&(&hdev->rpa_expired)->work) ->remove_cache_srcu FD: 129 BD: 72 +.+.: &tn->idrinfo->lock#2 ->&rq->__lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 2 BD: 72 +...: &p->tcfa_lock FD: 130 BD: 22 +.+.: &hdev->unregister_lock ->fs_reclaim ->pool_lock#2 ->&hdev->cmd_sync_work_lock ->&rq->__lock ->&c->lock FD: 1 BD: 23 +.+.: &hdev->cmd_sync_work_lock FD: 235 BD: 7 +.+.: (work_completion)(&hdev->cmd_sync_work) ->&hdev->cmd_sync_work_lock ->&hdev->req_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 23 +.+.: &conn->ident_lock FD: 1 BD: 24 ....: &list->lock#12 FD: 28 BD: 25 +.+.: &conn->chan_lock ->&rq->__lock FD: 33 BD: 16 +.+.: (work_completion)(&hdev->tx_work) ->&list->lock#12 ->tk_core.seq.seqcount ->&list->lock#11 ->&data->read_wait ->&list->lock#9 FD: 2 BD: 7 +.+.: (work_completion)(&conn->pending_rx_work) ->&list->lock#13 FD: 1 BD: 24 ....: &list->lock#13 FD: 1 BD: 5 +...: clock-AF_BLUETOOTH FD: 1 BD: 24 ....: rlock-AF_BLUETOOTH FD: 1 BD: 4 ....: wlock-AF_BLUETOOTH FD: 28 BD: 1 +.+.: &sb->s_type->i_mutex_key#19 ->&rq->__lock FD: 1 BD: 1 +.+.: &undo_list->lock FD: 1 BD: 72 +...: &nr_netdev_addr_lock_key FD: 1 BD: 72 +...: listen_lock FD: 192 BD: 6 +.+.: (work_completion)(&cp->cp_down_w) ->&cp->cp_cm_lock ->&cp->cp_lock ->&obj_hash[i].lock ->(work_completion)(&(&cp->cp_conn_w)->work) ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&rq->__lock ->pool_lock#2 ->&dir->lock ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#6 ->&fsnotify_mark_srcu FD: 21 BD: 75 +.+.: (wq_completion)phy23 ->(work_completion)(&local->reconfig_filter) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#11 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 133 BD: 4 +.+.: (work_completion)(&(&l->destroy_dwork)->work) ->&cgrp->pidlist_mutex ->&obj_hash[i].lock FD: 237 BD: 1 +.+.: (wq_completion)hci4 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 230 BD: 1 +.+.: (wq_completion)hci4#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 237 BD: 1 +.+.: (wq_completion)hci3 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 230 BD: 1 +.+.: (wq_completion)hci3#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 2 BD: 12 +.+.: rdma_nets.xa_lock ->pool_lock#2 FD: 1 BD: 4 +.+.: &____s->seqcount#11 FD: 2 BD: 3 +.+.: &(&net->ipv4.ping_group_range.lock)->lock ->&____s->seqcount#11 FD: 2 BD: 72 +.+.: &r->consumer_lock ->&r->producer_lock FD: 1 BD: 3450 +.-.: &r->producer_lock FD: 19 BD: 3427 +...: &bridge_netdev_addr_lock_key ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 48 BD: 78 +.-.: &br->hash_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&____s->seqcount#2 ->&n->list_lock ->_xmit_ETHER ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 131 BD: 3513 +.+.: j1939_netdev_lock ->&rq->__lock ->fs_reclaim ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&net->can.rcvlists_lock ->&obj_hash[i].lock ->&priv->lock FD: 7 BD: 3425 +...: &dev_addr_list_lock_key#2 ->pool_lock#2 ->&c->lock FD: 8 BD: 72 +...: &bat_priv->tvlv.handler_list_lock ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 14 BD: 79 +...: &bat_priv->tvlv.container_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&____s->seqcount FD: 19 BD: 3425 +...: &batadv_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock FD: 10 BD: 81 +...: &bat_priv->softif_vlan_list_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 20 BD: 80 +...: key#16 ->&bat_priv->softif_vlan_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 4 BD: 79 +...: &bat_priv->tt.changes_list_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 33 BD: 1 ..-.: &(&bat_priv->nc.work)->timer FD: 65 BD: 1 +.+.: (wq_completion)bat_events ->(work_completion)(&(&bat_priv->nc.work)->work) ->(work_completion)(&(&bat_priv->mcast.work)->work) ->(work_completion)(&(&bat_priv->orig_work)->work) ->(work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->(work_completion)(&(&bat_priv->tt.work)->work) ->&rq->__lock ->(work_completion)(&(&bat_priv->dat.work)->work) ->(work_completion)(&(&bat_priv->bla.work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 34 BD: 6 +.+.: (work_completion)(&(&bat_priv->nc.work)->work) ->key#17 ->key#18 ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq ->pool_lock#2 ->&cfs_rq->removed.lock FD: 1 BD: 7 +...: key#17 FD: 1 BD: 7 +...: key#18 FD: 161 BD: 73 +.+.: init_lock ->slab_mutex ->fs_reclaim ->&zone->lock ->&rq->__lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->crngs.lock FD: 1 BD: 3437 +.-.: deferred_lock FD: 872 BD: 2 +.+.: deferred_process_work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 75 ....: target_list_lock FD: 54 BD: 75 +.-.: &br->lock ->&br->hash_lock ->lweventlist_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->(console_sem).lock ->nl_table_lock ->nl_table_wait.lock ->&br->multicast_lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 FD: 33 BD: 1 ..-.: &(&bat_priv->mcast.work)->timer FD: 40 BD: 6 +.+.: (work_completion)(&(&bat_priv->mcast.work)->work) ->pool_lock#2 ->&bat_priv->mcast.mla_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->kfence_freelist_lock ->&meta->lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->quarantine_lock FD: 25 BD: 7 +.+.: &bat_priv->mcast.mla_lock ->pool_lock#2 ->key#16 ->&bat_priv->tt.changes_list_lock ->&bat_priv->tvlv.container_list_lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&bat_priv->softif_vlan_list_lock ->krc.lock FD: 157 BD: 1 +.+.: (wq_completion)bond0#7 ->(work_completion)(&(&slave->notify_work)->work) FD: 156 BD: 3511 +.+.: (work_completion)(&(&slave->notify_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->pool_lock#2 ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->stock_lock ->&rcu_state.expedited_wq FD: 2 BD: 3414 +.+.: &bond->stats_lock/1 FD: 33 BD: 1 ..-.: &(&slave->notify_work)->timer FD: 237 BD: 1 +.+.: (wq_completion)hci1 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 157 BD: 1 +.+.: (wq_completion)bond0#9 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond0#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 33 BD: 1 ..-.: &(&bat_priv->orig_work)->timer FD: 32 BD: 6 +.+.: (work_completion)(&(&bat_priv->orig_work)->work) ->key#19 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 74 +...: key#19 FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#9 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 42 BD: 75 +.+.: &hard_iface->bat_iv.ogm_buff_mutex ->crngs.lock ->pool_lock#2 ->batched_entropy_u8.lock ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&bat_priv->tt.commit_lock ->&bat_priv->tvlv.container_list_lock ->&____s->seqcount#2 ->&n->list_lock ->&cfs_rq->removed.lock ->kfence_freelist_lock FD: 14 BD: 76 +...: &bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&base->lock FD: 157 BD: 1 +.+.: (wq_completion)bond0#10 ->(work_completion)(&(&slave->notify_work)->work) FD: 31 BD: 1 ..-.: drivers/net/wireguard/ratelimiter.c:20 FD: 32 BD: 2 +.+.: (gc_work).work ->tk_core.seq.seqcount ->"ratelimiter_table_lock" ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 3 +.+.: "ratelimiter_table_lock" FD: 33 BD: 1 ..-.: &(&hdev->cmd_timer)->timer FD: 41 BD: 16 +.+.: (work_completion)(&(&hdev->cmd_timer)->work) ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 76 +.-.: &svc->sched_lock FD: 19 BD: 3427 +...: _xmit_NONE ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock FD: 1 BD: 72 +...: lock#9 FD: 193 BD: 73 +.+.: team->team_lock_key#3 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->lweventlist_lock ->pool_lock#2 ->(console_sem).lock ->&rq->__lock FD: 1 BD: 109 +..-: &____s->seqcount#12 FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 157 BD: 1 +.+.: (wq_completion)bond0#11 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 2 +.+.: br_ioctl_mutex.wait_lock FD: 1 BD: 3429 +.-.: &hsr->list_lock FD: 21 BD: 75 +.+.: (wq_completion)phy27 ->(work_completion)(&local->reconfig_filter) FD: 33 BD: 1 ..-.: &(&forw_packet_aggr->delayed_work)->timer FD: 46 BD: 73 +.+.: (work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->&hard_iface->bat_iv.ogm_buff_mutex ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount ->quarantine_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&meta->lock ->&rcu_state.expedited_wq ->&base->lock FD: 8 BD: 3425 +...: &vlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 21 BD: 75 +.-.: (&app->join_timer) ->&app->lock ->&list->lock#14 FD: 16 BD: 79 +.-.: &app->lock ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&c->lock ->&n->list_lock ->&list->lock#14 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 80 ..-.: &list->lock#14 FD: 19 BD: 75 +.-.: (&app->join_timer)#2 ->&app->lock#2 ->&list->lock#15 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock FD: 9 BD: 76 +.-.: &app->lock#2 ->pool_lock#2 ->&c->lock ->&list->lock#15 ->&n->list_lock FD: 1 BD: 77 ..-.: &list->lock#15 FD: 8 BD: 3425 +...: &macvlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 8 BD: 3425 +...: &dev_addr_list_lock_key#3 ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 1 BD: 72 ....: &xa->xa_lock#13 FD: 42 BD: 3426 +...: &dev_addr_list_lock_key#3/1 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->_xmit_ETHER FD: 2 BD: 72 +.+.: &tap_major->minor_lock ->pool_lock#2 FD: 3 BD: 72 +.+.: subsys mutex#81 ->&k->k_lock FD: 899 BD: 1 .+.+: kn->active#51 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 896 BD: 9 +.+.: nsim_bus_dev_list_lock ->fs_reclaim ->pool_lock#2 ->nsim_bus_dev_ids.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&____s->seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->device_links_lock ->nsim_bus_dev_list_lock.wait_lock ->&rq->__lock ->deferred_probe_mutex ->subsys mutex#82 ->&n->list_lock ->remove_cache_srcu ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 FD: 899 BD: 1 .+.+: kn->active#52 ->&rq->__lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock FD: 1 BD: 10 ....: nsim_bus_dev_ids.xa_lock FD: 2 BD: 18 +.+.: devlinks.xa_lock ->pool_lock#2 FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg0#13 ->(work_completion)(&peer->transmit_handshake_work) FD: 10 BD: 18 +.+.: &xa->xa_lock#14 ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 1 BD: 3558 +...: &data->fib_event_queue_lock FD: 1 BD: 18 ....: &(&fn_net->fib_chain)->lock FD: 134 BD: 18 +.+.: (work_completion)(&data->fib_event_work) ->&data->fib_event_queue_lock ->&data->fib_lock ->&rq->__lock FD: 132 BD: 19 +.+.: &data->fib_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&____s->seqcount ->&n->list_lock ->&meta->lock ->kfence_freelist_lock ->batched_entropy_u8.lock ->pool_lock ->remove_cache_srcu ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->quarantine_lock FD: 1 BD: 72 +...: &devlink_port->type_lock FD: 131 BD: 75 +.+.: bpf_devs_lock ->fs_reclaim ->pool_lock#2 ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->remove_cache_srcu ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 28 BD: 72 +.+.: (work_completion)(&(&devlink_port->type_warn_dw)->work) ->&rq->__lock FD: 1 BD: 72 +.+.: &vn->sock_lock FD: 31 BD: 1 ..-.: &(&nsim_dev->trap_data->trap_report_dw)->timer FD: 38 BD: 18 +.+.: (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 10 +.+.: nsim_bus_dev_list_lock.wait_lock FD: 2 BD: 1 +.-.: (&tun->flow_gc_timer) ->&tun->lock FD: 1 BD: 73 +.-.: &tun->lock FD: 31 BD: 1 ..-.: &(&hwstats->traffic_dw)->timer FD: 32 BD: 18 +.+.: (work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 31 BD: 73 +.+.: &hwstats->hwsdev_list_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 10 +.+.: subsys mutex#82 FD: 132 BD: 1 +.+.: (wq_completion)wg-kex-wg0#17 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) ->&rq->__lock FD: 151 BD: 72 ++++: devnet_rename_sem ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->&root->kernfs_rwsem ->kernfs_rename_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->quarantine_lock ->&____s->seqcount#2 FD: 1 BD: 285 ....: kernfs_rename_lock FD: 326 BD: 78 +.+.: &nft_net->commit_mutex ->fs_reclaim ->pool_lock#2 ->rcu_node_0 ->&obj_hash[i].lock ->&rq->__lock ->&rnp->exp_wq[2] ->&rnp->exp_wq[0] ->&c->lock ->&n->list_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->stock_lock ->batched_entropy_u32.lock ->&____s->seqcount ->(work_completion)(&(&priv->gc_work)->work) ->rcu_state.barrier_mutex ->(work_completion)(&ht->run_work) ->&ht->mutex ->rcu_state.barrier_mutex.wait_lock ->&lock->wait_lock ->nl_table_lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->remove_cache_srcu ->&p->alloc_lock ->nf_ct_proto_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->tk_core.seq.seqcount ->(console_sem).lock ->console_owner_lock ->console_owner ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock ->flowtable_lock ->(work_completion)(&(&flowtable->gc_work)->work) ->&ht->lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->nf_connlabels_lock ->krc.lock FD: 33 BD: 20 +.+.: &nsim_trap_data->trap_lock ->pool_lock#2 ->crngs.lock ->&nsim_dev->fa_cookie_lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->&zone->lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&base->lock ->&pgdat->kswapd_wait FD: 1 BD: 21 +...: &nsim_dev->fa_cookie_lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg0#15 ->(work_completion)(&peer->transmit_handshake_work) FD: 457 BD: 72 +.+.: &wg->device_update_lock ->&wg->static_identity.lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->pcpu_alloc_mutex ->&handshake->lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&table->lock ->&peer->endpoint_lock ->pool_lock ->&rq->__lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->cpu_hotplug_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&wg->socket_update_lock ->rcu_node_0 ->&rnp->exp_wq[3] ->&list->lock#17 ->&pool->lock/1 ->&rnp->exp_wq[0] ->&n->list_lock ->&____s->seqcount#2 ->&rnp->exp_wq[2] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&zone->lock ->&table->hash[i].lock ->k-clock-AF_INET ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->k-clock-AF_INET6 ->(&peer->timer_retransmit_handshake) ->&base->lock ->(&peer->timer_send_keepalive) ->(&peer->timer_new_handshake) ->(&peer->timer_zero_key_material) ->(&peer->timer_persistent_keepalive) ->(work_completion)(&peer->clear_peer_work) ->&peer->keypairs.keypair_update_lock ->&wq->mutex ->napi_hash_lock ->&table->lock#2 ->wq_pool_mutex ->wq_mayday_lock ->&x->wait ->pcpu_lock ->&r->consumer_lock#2 ->rcu_state.barrier_mutex ->init_lock ->&cfs_rq->removed.lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.barrier_mutex.wait_lock ->&rcu_state.expedited_wq ->&data->lock ->stock_lock FD: 133 BD: 129 ++++: &wg->static_identity.lock ->&handshake->lock ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock FD: 132 BD: 131 ++++: &handshake->lock ->crngs.lock ->tk_core.seq.seqcount ->&table->lock#2 ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&____s->seqcount#2 ->&rq->__lock ->&n->list_lock ->remove_cache_srcu ->&sem->wait_lock ->&cfs_rq->removed.lock FD: 1 BD: 73 +.+.: &table->lock FD: 59 BD: 132 ++-.: &peer->endpoint_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 72 +...: _xmit_SIT FD: 19 BD: 3425 +...: &bridge_netdev_addr_lock_key/1 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 17 BD: 75 +.-.: (&app->periodic_timer) ->&app->lock FD: 41 BD: 72 +.-.: (&brmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 40 BD: 3436 +.-.: &br->multicast_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->&c->lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->init_task.mems_allowed_seq.seqcount ->quarantine_lock ->rlock-AF_NETLINK FD: 41 BD: 72 +.-.: (&brmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 66 BD: 1 +.-.: (&in_dev->mr_ifc_timer) ->&obj_hash[i].lock ->batched_entropy_u32.lock ->&base->lock FD: 1 BD: 72 +...: _xmit_TUNNEL FD: 19 BD: 3435 +...: _xmit_IPGRE ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock FD: 31 BD: 1 ..-.: &(&conn->info_timer)->timer FD: 29 BD: 24 +.+.: (work_completion)(&(&conn->info_timer)->work) ->&conn->chan_lock FD: 19 BD: 72 +...: _xmit_TUNNEL6 ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock FD: 31 BD: 1 ..-.: &(&br->gc_work)->timer FD: 52 BD: 3429 +.-.: _xmit_TUNNEL6#2 ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock FD: 52 BD: 73 +.+.: (work_completion)(&(&br->gc_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock FD: 884 BD: 12 +.+.: &devlink->lock_key#4 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#14 ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&rq->__lock ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->mount_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->netdev_unregistering_wq.lock ->krc.lock ->&dir->lock#2 ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->(work_completion)(&data->fib_flush_work) ->(work_completion)(&data->fib_event_work) ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->pcpu_lock ->&n->list_lock ->®ion->snapshot_lock ->&____s->seqcount#2 ->stock_lock ->quarantine_lock ->rcu_state.barrier_mutex.wait_lock ->&x->wait#10 ->&rnp->exp_lock ->rcu_state.exp_mutex ->remove_cache_srcu ->pgd_lock ->key ->percpu_counters_lock FD: 45 BD: 3426 +...: &dev_addr_list_lock_key/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->pool_lock#2 ->&n->list_lock ->&bridge_netdev_addr_lock_key ->&____s->seqcount#2 ->_xmit_NONE ->&dev_addr_list_lock_key FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg0#21 ->(work_completion)(&peer->transmit_handshake_work) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg0#19 ->(work_completion)(&peer->transmit_handshake_work) FD: 42 BD: 3425 +...: &dev_addr_list_lock_key#2/1 ->_xmit_ETHER ->&c->lock ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 41 BD: 1 +.-.: (&pmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 41 BD: 1 +.-.: (&pmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 1 BD: 6 +.+.: genl_mutex.wait_lock FD: 19 BD: 3425 +...: _xmit_ETHER/1 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 26 BD: 72 +.-.: (&hsr->announce_timer) FD: 25 BD: 3427 +.-.: &hsr->seqnr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock ->&data->lock ->quarantine_lock FD: 1 BD: 3428 +.-.: &new_node->seq_out_lock FD: 1 BD: 72 +.+.: &nn->netlink_tap_lock FD: 19 BD: 3426 +...: &batadv_netdev_addr_lock_key/1 ->&c->lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->&n->list_lock FD: 15 BD: 72 +.-.: (&hsr->prune_timer) ->&hsr->list_lock ->&obj_hash[i].lock ->&base->lock FD: 33 BD: 1 ..-.: &(&bat_priv->tt.work)->timer FD: 37 BD: 6 +.+.: (work_completion)(&(&bat_priv->tt.work)->work) ->key#16 ->key#20 ->&rq->__lock ->&bat_priv->tt.req_list_lock ->&bat_priv->tt.roam_list_lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->rcu_node_0 ->pool_lock#2 ->&rcu_state.expedited_wq FD: 1 BD: 7 +...: key#20 FD: 1 BD: 7 +...: &bat_priv->tt.req_list_lock FD: 1 BD: 7 +...: &bat_priv->tt.roam_list_lock FD: 43 BD: 3426 +...: &vlan_netdev_addr_lock_key/1 ->_xmit_ETHER ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->&dev_addr_list_lock_key FD: 42 BD: 3425 +...: &macvlan_netdev_addr_lock_key/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&n->list_lock FD: 95 BD: 1 +.-.: (&ndev->rs_timer) ->&ndev->lock ->pool_lock#2 ->&dir->lock#2 ->&ul->lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&zone->lock ->init_task.mems_allowed_seq.seqcount ->key#26 FD: 19 BD: 76 +...: &ipvlan->addrs_lock ->pool_lock#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->krc.lock ->&c->lock FD: 1 BD: 73 +.-.: &list->lock#16 FD: 33 BD: 72 +.+.: (work_completion)(&port->bc_work) ->&list->lock#16 ->&obj_hash[i].lock ->&rq->__lock ->pool_lock#2 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 42 BD: 3425 +...: &macsec_netdev_addr_lock_key/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&n->list_lock ->&____s->seqcount#2 FD: 22 BD: 72 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->&sch->q.lock FD: 20 BD: 3428 +.-.: key#21 ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&entry->crc_lock FD: 26 BD: 76 +...: &bat_priv->tt.commit_lock ->key#16 ->&bat_priv->softif_vlan_list_lock ->&bat_priv->tt.changes_list_lock ->&bat_priv->tt.last_changeset_lock ->pool_lock#2 ->&bat_priv->tvlv.container_list_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount ->batched_entropy_u8.lock FD: 1 BD: 3429 +.-.: &entry->crc_lock FD: 1 BD: 73 +.+.: &wg->socket_update_lock FD: 16 BD: 3467 +.-.: &list->lock#17 ->&data->lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock FD: 200 BD: 73 +.+.: team->team_lock_key#9 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&c->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->&n->list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->&rq->__lock ->(console_sem).lock ->pool_lock#2 ->&tbl->lock ->&pn->hash_lock FD: 130 BD: 19 +.+.: (work_completion)(&peer->transmit_handshake_work) ->tk_core.seq.seqcount ->&wg->static_identity.lock ->&cookie->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&peer->endpoint_lock ->batched_entropy_u8.lock ->&rq->__lock ->&c->lock ->&n->list_lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 133 +...: &table->lock#2 FD: 28 BD: 57 ++++: &cookie->lock ->&rq->__lock FD: 1 BD: 3466 +.-.: &r->producer_lock#2 FD: 132 BD: 1 +.+.: (wq_completion)wg-kex-wg2#17 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 165 BD: 37 +.+.: (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->&r->consumer_lock#2 ->&wg->static_identity.lock ->&peer->endpoint_lock ->tk_core.seq.seqcount ->&cookie->lock ->&handshake->lock ->&obj_hash[i].lock ->&base->lock ->&c->lock ->pool_lock#2 ->&list->lock#17 ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->batched_entropy_u8.lock FD: 1 BD: 110 +.+.: &r->consumer_lock#2 FD: 12 BD: 132 +.-.: &peer->keypairs.keypair_update_lock ->&table->lock#2 ->&obj_hash[i].lock ->pool_lock#2 FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg0#18 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg2#18 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 130 BD: 15 +.+.: &data->mtx ->fs_reclaim ->pool_lock#2 ->&rfkill->lock ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&rq->__lock ->&n->list_lock FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#9 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#9 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 28 BD: 19 +.+.: (work_completion)(&peer->transmit_packet_work) ->&obj_hash[i].lock ->&peer->endpoint_lock ->&base->lock ->batched_entropy_u8.lock ->&rq->__lock ->pool_lock#2 ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg1#18 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 1 BD: 1 +.-.: &keypair->receiving_counter.lock FD: 132 BD: 1 +.+.: (wq_completion)wg-kex-wg1#17 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 884 BD: 12 +.+.: &devlink->lock_key#9 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#14 ->&data->lock ->pcpu_alloc_mutex ->&rq->__lock ->&____s->seqcount ->rcu_node_0 ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->mount_lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->netdev_unregistering_wq.lock ->krc.lock ->&dir->lock#2 ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->(work_completion)(&data->fib_flush_work) ->(work_completion)(&data->fib_event_work) ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->&rnp->exp_lock ->rcu_state.exp_mutex ->pcpu_lock ->®ion->snapshot_lock ->pcpu_alloc_mutex.wait_lock ->stock_lock ->pool_lock#2 ->rcu_state.barrier_mutex.wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&x->wait#10 ->rcu_state.exp_mutex.wait_lock ->quarantine_lock ->remove_cache_srcu ->&lock->wait_lock FD: 1 BD: 79 ....: &wdev->event_lock FD: 28 BD: 75 +.+.: (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) ->&rq->__lock FD: 37 BD: 80 +.+.: &local->key_mtx ->&obj_hash[i].lock ->&rq->__lock ->&rnp->exp_lock ->&rnp->exp_wq[3] FD: 1 BD: 81 ..-.: &rdev->wiphy_work_lock FD: 1 BD: 75 ....: (&dwork->timer) FD: 1 BD: 75 +.+.: (work_completion)(&(&link->color_collision_detect_work)->work) FD: 237 BD: 1 +.+.: (wq_completion)hci2 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 598 BD: 6 +.+.: (work_completion)(&rdev->wiphy_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 80 ..-.: &list->lock#18 FD: 1 BD: 79 +.-.: &ifibss->incomplete_lock FD: 142 BD: 79 +.+.: &local->mtx ->&local->chanctx_mtx ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->&local->ack_status_lock ->&rq->__lock ->&c->lock ->&n->list_lock FD: 874 BD: 1 +.+.: (wq_completion)cfg80211 ->(work_completion)(&rdev->event_work) ->(work_completion)(&(&rdev->dfs_update_channels_wk)->work) FD: 598 BD: 6 +.+.: (work_completion)(&rdev->event_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock FD: 151 BD: 2 +.+.: wireless_nlevent_work ->net_rwsem FD: 9 BD: 77 +...: &bat_priv->tt.last_changeset_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 3451 +.-.: &local->active_txq_lock[i] FD: 40 BD: 3445 +.-.: &local->handle_wake_tx_queue_lock ->&local->active_txq_lock[i] ->&local->queue_stop_reason_lock ->&fq->lock ->tk_core.seq.seqcount ->hwsim_radio_lock ->&list->lock#19 ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&list->lock#26 FD: 1 BD: 3453 ..-.: &local->queue_stop_reason_lock FD: 1 BD: 3455 ..-.: &list->lock#19 FD: 230 BD: 1 +.+.: (wq_completion)hci2#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 145 BD: 1 +.+.: &type->s_umount_key#46/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#32 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->binderfs_minors_mutex ->&dentry->d_lock ->&sb->s_type->i_mutex_key#20 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 41 BD: 4 +.+.: &sb->s_type->i_lock_key#32 ->&dentry->d_lock FD: 2 BD: 3 +.+.: binderfs_minors_mutex ->binderfs_minors.xa_lock FD: 1 BD: 4 ....: binderfs_minors.xa_lock FD: 132 BD: 2 +.+.: &sb->s_type->i_mutex_key#20 ->&sb->s_type->i_lock_key#32 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 34 BD: 1 +.-.: &local->rx_path_lock ->&list->lock#18 ->&rdev->wiphy_work_lock ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock FD: 19 BD: 83 +...: &sta->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 19 BD: 79 +.-.: &sta->rate_ctrl_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock FD: 170 BD: 79 +.+.: &local->sta_mtx ->fs_reclaim ->pool_lock#2 ->&local->chanctx_mtx ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount ->&sta->ampdu_mlme.mtx ->(work_completion)(&sta->ampdu_mlme.work) ->&sta->lock ->krc.lock ->&local->key_mtx ->&fq->lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->mount_lock ->&local->active_txq_lock[i] ->(work_completion)(&sta->drv_deliver_wk) ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&n->list_lock ->remove_cache_srcu FD: 1 BD: 3 +.+.: iunique_lock FD: 850 BD: 3 +.+.: &type->i_mutex_dir_key#6/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->&c->lock ->&xa->xa_lock#12 ->&obj_hash[i].lock ->stock_lock FD: 1 BD: 18 ....: task_group_lock FD: 145 BD: 1 .+.+: kn->active#53 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 145 BD: 1 ++++: kn->active#54 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->stock_lock ->&n->list_lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount FD: 3 BD: 138 ..-.: cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&obj_hash[i].lock FD: 29 BD: 138 ....: cgroup_threadgroup_rwsem.waiters.lock ->&p->pi_lock FD: 1 BD: 18 +.+.: (wq_completion)cpuset_migrate_mm FD: 850 BD: 3 +.+.: &type->i_mutex_dir_key#7/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->pool_lock#2 ->&xa->xa_lock#12 ->&obj_hash[i].lock ->stock_lock ->cgroup_mutex.wait_lock ->&p->pi_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->&sb->s_type->i_lock_key#31 FD: 149 BD: 1 ++++: kn->active#55 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->stock_lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&cgrp->pidlist_mutex FD: 1 BD: 142 ....: cpuset_attach_wq.lock FD: 2 BD: 4132 -.-.: stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 145 BD: 1 .+.+: kn->active#56 ->fs_reclaim ->stock_lock ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&rq->__lock ->&____s->seqcount ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 146 BD: 1 .+.+: kn->active#57 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->memcg_max_mutex ->&c->lock FD: 1 BD: 8 +.+.: memcg_max_mutex FD: 1 BD: 5 +...: &bat_priv->gw.list_lock FD: 1 BD: 6 ....: &per_cpu(xt_recseq, i) FD: 303 BD: 5 +.+.: nf_nat_proto_mutex ->fs_reclaim ->pool_lock#2 ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->stock_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->nf_hook_mutex.wait_lock ->&p->pi_lock ->krc.lock ->&rq->__lock FD: 1 BD: 103 ..-.: elock-AF_INET6 FD: 31 BD: 1 +.+.: loop_validate_mutex ->&lo->lo_mutex ->&rq->__lock ->loop_validate_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 278 BD: 73 +.+.: team->team_lock_key#7 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&c->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->lweventlist_lock ->(console_sem).lock ->&rq->__lock ->quarantine_lock ->remove_cache_srcu ->pool_lock#2 ->&pn->hash_lock ->&ul->lock#2 ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&net->ipv6.addrconf_hash_lock ->&idev->mc_lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->&idev->mc_report_lock ->krc.lock ->sysctl_lock ->stock_lock ->&bond->mode_lock ->&dev_addr_list_lock_key ->batched_entropy_u32.lock ->pcpu_alloc_mutex FD: 4 BD: 5 +.+.: &bat_priv->bat_v.ogm_buff_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 72 +...: &bpq_netdev_addr_lock_key FD: 1 BD: 72 +...: nr_neigh_list_lock FD: 1 BD: 5 +.+.: (work_completion)(&(&bat_priv->bat_v.ogm_wq)->work) FD: 884 BD: 12 +.+.: &devlink->lock_key#7 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&xa->xa_lock#14 ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->pcpu_alloc_mutex ->&rq->__lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->pool_lock#2 ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->mount_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->netdev_unregistering_wq.lock ->krc.lock ->&dir->lock#2 ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->rtnl_mutex.wait_lock ->&p->pi_lock ->quarantine_lock ->(work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->(work_completion)(&data->fib_flush_work) ->(work_completion)(&data->fib_event_work) ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->pcpu_lock ->®ion->snapshot_lock ->remove_cache_srcu ->stock_lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg1#13 ->(work_completion)(&peer->transmit_handshake_work) FD: 35 BD: 2 +.+.: (work_completion)(&(&devlink->rwork)->work) ->&obj_hash[i].lock ->&rq->__lock FD: 37 BD: 73 +.-.: (&peer->timer_persistent_keepalive) ->&list->lock#17 ->tk_core.seq.seqcount ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 10 +...: &tn->node_list_lock FD: 1 BD: 3 +...: clock-AF_ROSE FD: 8 BD: 3 +.+.: sk_lock-AF_ROSE ->slock-AF_ROSE ->rose_list_lock ->&obj_hash[i].lock ->wlock-AF_ROSE ->&list->lock#20 ->rlock-AF_ROSE FD: 1 BD: 4 +...: slock-AF_ROSE FD: 1 BD: 4 ....: wlock-AF_ROSE FD: 1 BD: 4 ....: &list->lock#20 FD: 1 BD: 4 +...: rose_list_lock FD: 1 BD: 4 ....: rlock-AF_ROSE FD: 1 BD: 72 +...: &bat_priv->forw_bcast_list_lock FD: 163 BD: 1 +.+.: &net->xfrm.xfrm_cfg_mutex ->&net->xfrm.xfrm_policy_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->crypto_alg_sem ->fs_reclaim ->&net->xfrm.xfrm_state_lock ->xfrm_state_gc_lock ->&rq->__lock ->rlock-AF_NETLINK ->(kmod_concurrent_max).lock ->&n->list_lock ->&x->wait#17 ->running_helpers_waitq.lock ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->(&timer.timer) ->crypto_default_null_skcipher_lock ->(console_sem).lock ->&policy->lock ->&list->lock#33 ->rlock-AF_KEY ->&data->lock ->&pfk->dump_lock ->&(&net->xfrm.policy_hthresh.lock)->lock FD: 36 BD: 107 +...: &net->xfrm.xfrm_policy_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&____s->seqcount#14 ->krc.lock ->&c->lock ->&n->list_lock ->&____s->seqcount#12 FD: 1 BD: 3505 +.-.: &nf_nat_locks[i] FD: 1 BD: 72 +.+.: (work_completion)(&(&priv->scan_result)->work) FD: 877 BD: 12 +.+.: &devlink->lock_key#11 ->crngs.lock ->fs_reclaim ->&c->lock ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#14 ->&data->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->quarantine_lock ->remove_cache_srcu ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg0#6 ->(work_completion)(&peer->transmit_handshake_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg1#22 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg1#21 ->(work_completion)(&peer->transmit_handshake_work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#11 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg0#22 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 193 BD: 73 +.+.: team->team_lock_key#11 ->fs_reclaim ->&c->lock ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->&n->list_lock ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->&____s->seqcount#2 ->remove_cache_srcu FD: 1 BD: 4 +.+.: &q->instances_lock FD: 12 BD: 5 +...: &log->instances_lock ->pool_lock#2 ->&obj_hash[i].lock ->&dir->lock ->&inst->lock ->&c->lock FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg2#22 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2#21 ->(work_completion)(&peer->transmit_handshake_work) ->&rq->__lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg1#7 ->(work_completion)(&peer->transmit_handshake_work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#11 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg0#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg1#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2#7 ->(work_completion)(&peer->transmit_handshake_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg2#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 460 BD: 73 +.+.: team->team_lock_key#10 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&c->lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->remove_cache_srcu ->&n->list_lock ->lweventlist_lock ->(console_sem).lock ->&____s->seqcount#2 ->&____s->seqcount ->&dev_addr_list_lock_key#3/1 ->pool_lock#2 ->&pn->hash_lock ->&dev->tx_global_lock ->&sch->q.lock ->__ip_vs_mutex ->krc.lock ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->flowtable_lock ->&dir->lock ->&net->ipv6.addrconf_hash_lock ->&cfs_rq->removed.lock ->pcpu_alloc_mutex ->&dev_addr_list_lock_key ->&vlan_netdev_addr_lock_key/1 ->&app->lock FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg0#20 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg1#19 ->(work_completion)(&peer->transmit_handshake_work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg1#20 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 884 BD: 12 +.+.: &devlink->lock_key#10 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->&xa->xa_lock#14 ->&data->lock ->pcpu_alloc_mutex ->&____s->seqcount#2 ->&____s->seqcount ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->mount_lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->netdev_unregistering_wq.lock ->krc.lock ->&dir->lock#2 ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->(work_completion)(&data->fib_flush_work) ->(work_completion)(&data->fib_event_work) ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->pcpu_lock ->®ion->snapshot_lock ->stock_lock FD: 1 BD: 3441 .+.-: &table->lock#3 FD: 14 BD: 1 +.-.: (&tsc_sync_check_timer) ->&obj_hash[i].lock ->&base->lock FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg2#20 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2#19 ->(work_completion)(&peer->transmit_handshake_work) FD: 145 BD: 1 .+.+: kn->active#63 ->fs_reclaim ->&c->lock ->&rq->__lock ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 1 ....: _rs.lock#21 FD: 21 BD: 75 +.+.: (wq_completion)phy24 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 76 ....: key#27 FD: 1 BD: 4664 ....: cid_lock FD: 31 BD: 7 +.+.: &cp->cp_cm_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 237 BD: 1 +.+.: (wq_completion)hci5 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 9 BD: 72 +.+.: mrt_lock#2 ->pool_lock#2 ->&dir->lock#2 FD: 1 BD: 72 +...: _xmit_PIMREG FD: 188 BD: 1 +.+.: (wq_completion)phy8 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&link->csa_finalize_work) FD: 237 BD: 1 +.+.: (wq_completion)hci0 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 230 BD: 1 +.+.: (wq_completion)hci5#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 1 BD: 5 +.+.: &device->unregistration_lock FD: 188 BD: 1 +.+.: (wq_completion)phy12 ->&rq->__lock ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&link->csa_finalize_work) FD: 230 BD: 1 +.+.: (wq_completion)hci0#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 1 BD: 73 +.+.: &tp->lock FD: 1 BD: 18 +.+.: cgroup_mutex.wait_lock FD: 1 BD: 1 ....: _rs.lock FD: 1 BD: 89 +.+.: nf_hook_mutex.wait_lock FD: 1 BD: 3 +...: l2tp_ip6_lock FD: 192 BD: 8 +.+.: &sb->s_type->i_mutex_key#8/4 ->mapping.invalidate_lock ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&ei->i_data_sem ->&ei->i_data_sem/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 192 BD: 129 .+.+: sb_pagefaults ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->mapping.invalidate_lock ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&sb->s_type->i_lock_key#22 ->rcu_node_0 ->&rcu_state.expedited_wq ->remove_cache_srcu ->&n->list_lock ->&journal->j_wait_transaction_locked ->&base->lock ->&wb->list_lock FD: 95 BD: 146 +.+.: &ei->i_data_sem/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&ret->b_state_lock ->&ei->i_raw_lock ->&ei->i_es_lock ->&rq->__lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&ei->i_prealloc_lock ->&sb->s_type->i_lock_key#22 ->&journal->j_wait_updates ->&obj_hash[i].lock ->&mapping->private_lock ->&wb->list_lock ->bit_wait_table + i ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->rcu_node_0 FD: 245 BD: 1 +.+.: sk_lock-AF_ALG ->slock-AF_ALG ->fs_reclaim ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&mm->mmap_lock ->&c->lock ->&n->list_lock ->sk_lock-AF_ALG/1 ->&rq->__lock ->rcu_node_0 ->&ei->socket.wq.wait ->&rcu_state.expedited_wq ->quarantine_lock ->&____s->seqcount#2 ->&____s->seqcount ->(console_sem).lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&sem->wait_lock ->&p->pi_lock ->remove_cache_srcu ->&cfs_rq->removed.lock FD: 30 BD: 3 +...: slock-AF_ALG ->&sk->sk_lock.wq FD: 31 BD: 72 +.+.: &net->ipv4.ra_mutex ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 240 BD: 75 +.+.: &net->xdp.lock ->&rq->__lock ->&xs->mutex FD: 239 BD: 76 +.+.: &xs->mutex ->fs_reclaim ->pool_lock#2 ->umem_ida.xa_lock ->&c->lock ->&mm->mmap_lock ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&zone->lock ->rcu_node_0 ->&obj_hash[i].lock ->&n->list_lock ->&rcu_state.expedited_wq ->&lock->wait_lock ->&rnp->exp_wq[2] ->&cfs_rq->removed.lock ->remove_cache_srcu ->&____s->seqcount#2 ->&rnp->exp_lock ->rcu_state.exp_mutex ->batched_entropy_u8.lock ->kfence_freelist_lock ->purge_vmap_area_lock ->rcu_state.exp_mutex.wait_lock ->&pool->xsk_tx_list_lock FD: 1 BD: 78 ....: umem_ida.xa_lock FD: 12 BD: 238 +...: link_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 307 BD: 10 +.+.: tracepoints_mutex ->fs_reclaim ->pool_lock#2 ->cpu_hotplug_lock ->tracepoint_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->tracepoint_srcu ->&x->wait#3 ->tracepoints_mutex.wait_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[0] ->reg_lock ->tasklist_lock ->&n->list_lock FD: 1 BD: 3 +...: &xs->map_list_lock FD: 1 BD: 3 +...: clock-AF_XDP FD: 15 BD: 15 ....: tracepoint_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) ->&base->lock FD: 31 BD: 1 +.-.: (&sdp->delay_work) FD: 872 BD: 2 +.+.: (work_completion)(&pool->work) ->&rq->__lock ->rtnl_mutex ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->umem_ida.xa_lock ->&lruvec->lru_lock ->&zone->lock ->rcu_node_0 ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&meta->lock ->kfence_freelist_lock FD: 250 BD: 3 +.+.: sk_lock-AF_NETROM ->&rq->__lock ->slock-AF_NETROM ->&obj_hash[i].lock ->wlock-AF_NETROM ->&list->lock#22 ->nr_list_lock ->rlock-AF_NETROM ->ax25_uid_lock ->pool_lock#2 ->&list->lock#31 ->&base->lock ->&ei->socket.wq.wait ->clock-AF_NETROM ->&c->lock ->&n->list_lock ->rcu_node_0 ->&data->lock ->&mm->mmap_lock ->&____s->seqcount#2 ->&____s->seqcount ->&rcu_state.expedited_wq ->nr_node_list_lock FD: 39 BD: 6 +.-.: slock-AF_NETROM ->pool_lock#2 ->&list->lock#31 ->&obj_hash[i].lock ->&base->lock ->rlock-AF_NETROM ->wlock-AF_NETROM ->&list->lock#22 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&data->lock FD: 33 BD: 1 ..-.: &(&bat_priv->dat.work)->timer FD: 33 BD: 1 ..-.: &(&bat_priv->bla.work)->timer FD: 32 BD: 6 +.+.: (work_completion)(&(&bat_priv->dat.work)->work) ->&hash->list_locks[i] ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 7 +...: &hash->list_locks[i] FD: 41 BD: 6 +.+.: (work_completion)(&(&bat_priv->bla.work)->work) ->key#21 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->crngs.lock ->&rcu_state.expedited_wq FD: 1 BD: 155 ..-.: key#22 FD: 311 BD: 11 +.+.: ipvs->sync_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->&obj_hash[i].lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&ipvs->sync_buff_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->(console_sem).lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&cfs_rq->removed.lock ->&n->list_lock ->rtnl_mutex.wait_lock ->&mm->mmap_lock FD: 258 BD: 3 +.+.: sk_lock-AF_TIPC ->slock-AF_TIPC ->fs_reclaim ->pool_lock#2 ->&rq->__lock ->&mm->mmap_lock ->&list->lock#21 ->&ei->socket.wq.wait ->rcu_node_0 ->&obj_hash[i].lock ->&base->lock ->clock-AF_TIPC ->&c->lock ->&srv->idr_lock ->&tn->nametbl_lock ->&con->sub_lock ->&tipc_net(net)->bclock ->&____s->seqcount#2 ->&____s->seqcount ->&con->outqueue_lock ->&n->list_lock ->quarantine_lock ->&zone->lock ->&sem->wait_lock ->&p->pi_lock ->remove_cache_srcu ->tk_core.seq.seqcount ->&list->lock#5 ->pcpu_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&data->lock ->&list->lock#45 ->&cfs_rq->removed.lock FD: 39 BD: 4 +...: slock-AF_TIPC ->&list->lock#21 ->&list->lock#32 ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->&sk->sk_lock.wq ->&data->lock ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 22 +...: &list->lock#21 FD: 1 BD: 4 +...: clock-AF_TIPC FD: 1 BD: 4126 +.+.: &pa->pa_lock#2 FD: 1 BD: 4 +...: clock-AF_NETROM FD: 1 BD: 7 ..-.: wlock-AF_NETROM FD: 1 BD: 7 ..-.: &list->lock#22 FD: 1 BD: 75 +.-.: nr_list_lock FD: 1 BD: 7 ..-.: rlock-AF_NETROM FD: 1 BD: 144 +.+.: jump_label_mutex.wait_lock FD: 1088 BD: 1 +.+.: (wq_completion)netns ->net_cleanup_work FD: 1087 BD: 2 +.+.: net_cleanup_work ->pernet_ops_rwsem ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&dir->lock ->stock_lock ->&base->lock ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 5 +...: &net->nsid_lock FD: 28 BD: 5 +.+.: netns_bpf_mutex ->&rq->__lock FD: 1 BD: 5 ....: (&net->fs_probe_timer) FD: 1 BD: 7 ++++: &net->cells_lock FD: 1 BD: 5 ....: (&net->cells_timer) FD: 34 BD: 1 +.+.: (wq_completion)afs ->(work_completion)(&net->cells_manager) ->(work_completion)(&net->fs_manager) FD: 31 BD: 2 +.+.: (work_completion)(&net->cells_manager) ->&net->cells_lock ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 5 ....: (&net->fs_timer) FD: 31 BD: 2 +.+.: (work_completion)(&net->fs_manager) ->&(&net->fs_lock)->lock ->&rq->__lock ->bit_wait_table + i FD: 1 BD: 3 +.+.: &(&net->fs_lock)->lock FD: 1 BD: 6 +.+.: &rx->incoming_lock FD: 1 BD: 6 +.+.: &call->notify_lock FD: 1 BD: 6 ....: (rxrpc_call_limiter).lock FD: 1 BD: 6 +.+.: &rx->recvmsg_lock FD: 1 BD: 6 ....: (&call->timer) FD: 1 BD: 6 ....: &list->lock#23 FD: 1 BD: 5 +.+.: (wq_completion)kafsd FD: 1 BD: 5 +...: k-clock-AF_RXRPC FD: 1 BD: 7 ..-.: rlock-AF_RXRPC FD: 1 BD: 1 ....: (&local->client_conn_reap_timer) FD: 1 BD: 1 ....: &list->lock#24 FD: 1 BD: 9 +.+.: (work_completion)(&data->gc_work) FD: 1 BD: 5 +.+.: (work_completion)(&ovs_net->dp_notify_work) FD: 9 BD: 25 +.-.: &srv->idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 5 ....: (&rxnet->service_conn_reap_timer) FD: 1 BD: 5 +.+.: (work_completion)(&(&c->work)->work) FD: 1 BD: 22 +...: &nt->cluster_scope_lock FD: 1 BD: 5 +.+.: (work_completion)(&tn->work) FD: 216 BD: 5 +.+.: (wq_completion)krdsd ->(work_completion)(&(&cp->cp_conn_w)->work) ->(work_completion)(&(&cp->cp_send_w)->work) ->(work_completion)(&(&cp->cp_recv_w)->work) ->(work_completion)(&cp->cp_down_w) ->(work_completion)(&barr->work) ->(work_completion)(&rtn->rds_tcp_accept_w) FD: 210 BD: 6 +.+.: (work_completion)(&rtn->rds_tcp_accept_w) ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->rds_cong_lock ->rds_trans_sem ->&tc->t_conn_path_lock ->&xa->xa_lock#6 ->&rq->__lock ->&fsnotify_mark_srcu FD: 1 BD: 7 ....: rds_tcp_conn_lock FD: 1 BD: 8 ....: loop_conns_lock FD: 276 BD: 5 +.+.: (wq_completion)l2tp ->(work_completion)(&tunnel->del_work) FD: 2 BD: 8 +.+.: (work_completion)(&rxnet->service_conn_reaper) ->&rxnet->conn_lock FD: 1 BD: 75 +.+.: mirred_list_lock FD: 4 BD: 77 +...: &idev->mc_query_lock ->&obj_hash[i].lock FD: 28 BD: 77 +.+.: (work_completion)(&(&idev->mc_report_work)->work) ->&rq->__lock FD: 1 BD: 77 +...: &idev->mc_report_lock FD: 29 BD: 75 +.+.: &pnn->pndevs.lock ->&rq->__lock FD: 28 BD: 75 +.+.: &pnn->routes.lock ->&rq->__lock FD: 1 BD: 23 ....: netdev_unregistering_wq.lock FD: 65 BD: 73 +.-.: (&peer->timer_retransmit_handshake) ->&peer->endpoint_lock ->&obj_hash[i].lock ->&list->lock#17 FD: 2 BD: 137 +.+.: (work_completion)(flush) ->&list->lock#5 FD: 1 BD: 10 +.+.: &fn->fou_lock FD: 880 BD: 11 ++++: rdma_nets_rwsem ->rdma_nets.xa_lock ->&device->compat_devs_mutex ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock ->rdma_nets_rwsem.wait_lock FD: 1 BD: 5 +...: k-clock-AF_NETLINK FD: 1 BD: 5 +.+.: &hn->hn_lock FD: 51 BD: 72 +.+.: &caifn->caifdevs.lock ->&obj_hash[i].lock ->&rnp->exp_wq[2] ->&rq->__lock ->pool_lock#2 ->&this->info_list_lock ->&rnp->exp_wq[0] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->quarantine_lock FD: 136 BD: 1 +.+.: (wq_completion)inet_frag_wq ->(work_completion)(&fqdir->destroy_work) FD: 135 BD: 2 +.+.: (work_completion)(&fqdir->destroy_work) ->(work_completion)(&ht->run_work) ->&ht->mutex FD: 45 BD: 2 +.+.: fqdir_free_work ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->quarantine_lock ->&base->lock FD: 1 BD: 74 +...: &this->info_list_lock FD: 1 BD: 5 +.+.: &pnetids_ndev->lock FD: 169 BD: 76 +.+.: k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#25 ->pool_lock#2 ->&dir->lock ->fs_reclaim ->k-clock-AF_INET6 ->&rq->__lock ->&c->lock ->&obj_hash[i].lock FD: 1 BD: 78 +.-.: rlock-AF_INET6 FD: 1 BD: 80 ....: &list->lock#25 FD: 93 BD: 79 +.-.: k-slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->k-clock-AF_INET6 ->tk_core.seq.seqcount ->clock-AF_INET6 ->&base->lock ->slock-AF_INET6 ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] FD: 1 BD: 90 ++.-: &sctp_ep_hashtable[i].lock FD: 29 BD: 138 ....: &bdi->wb_waitq ->&p->pi_lock FD: 29 BD: 119 ....: &sk->sk_lock.wq ->&p->pi_lock FD: 45 BD: 73 +.+.: (work_completion)(&br->mcast_gc_work) ->&br->multicast_lock ->(&p->rexmit_timer) ->&obj_hash[i].lock ->&base->lock ->(&p->timer) ->pool_lock#2 ->krc.lock ->(&mp->timer) ->&rq->__lock FD: 1 BD: 74 ....: (&p->rexmit_timer) FD: 41 BD: 74 +.-.: (&p->timer) ->&br->multicast_lock FD: 55 BD: 72 +.-.: (&br->hello_timer) ->&br->lock FD: 1 BD: 72 ....: (&br->topology_change_timer) FD: 1 BD: 72 ....: (&br->tcn_timer) FD: 1 BD: 72 ....: (&brmctx->ip4_mc_router_timer) FD: 1 BD: 72 ....: (&brmctx->ip4_other_query.timer) FD: 1 BD: 72 ....: (&brmctx->ip6_mc_router_timer) FD: 1 BD: 72 ....: (&brmctx->ip6_other_query.timer) FD: 382 BD: 74 +.+.: __ip_vs_mutex ->&rq->__lock ->&ipvs->dest_trash_lock ->__ip_vs_mutex.wait_lock ->ip_vs_sched_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->pcpu_alloc_mutex ->&____s->seqcount ->&base->lock ->ipvs->est_mutex ->(console_sem).lock ->console_owner_lock ->console_owner ->rcu_node_0 ->&rcu_state.expedited_wq ->&mm->mmap_lock ->(&tbl->periodic_timer) ->&svc->sched_lock ->krc.lock ->&s->lock ->&cfs_rq->removed.lock ->&c->lock FD: 1 BD: 75 +...: &ipvs->dest_trash_lock FD: 41 BD: 82 +.+.: flowtable_lock ->&rq->__lock ->&ht->lock ->&(&flowtable->gc_work)->timer ->&obj_hash[i].lock ->&base->lock ->(work_completion)(&(&flowtable->gc_work)->work) ->(wq_completion)nf_ft_offload_add ->&wq->mutex ->(wq_completion)nf_ft_offload_del ->(wq_completion)nf_ft_offload_stats ->&x->wait#10 FD: 1 BD: 11 ....: tracepoint_srcu FD: 1 BD: 82 +.+.: rcu_state.barrier_mutex.wait_lock FD: 74 BD: 1 .+.+: sb_writers#13 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#27 ->&wb->list_lock FD: 35 BD: 73 +.-.: (&peer->timer_send_keepalive) ->pool_lock#2 ->&list->lock#17 ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 5 +.+.: &sn->gssp_lock FD: 1 BD: 8 +.+.: &cd->hash_lock FD: 53 BD: 6 +.+.: xfrm_state_gc_work ->xfrm_state_gc_lock ->&obj_hash[i].lock ->&pool->lock ->&rq->__lock ->(&x->rtimer) ->&base->lock ->pool_lock#2 ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[3] FD: 18 BD: 76 +...: &net->xfrm.xfrm_state_lock ->hrtimer_bases.lock ->&obj_hash[i].lock ->&base->lock FD: 15 BD: 76 +.-.: ip6_fl_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->batched_entropy_u32.lock FD: 1 BD: 5 ....: (&net->ipv6.ip6_fib_timer) FD: 1 BD: 72 ....: (&mrt->ipmr_expire_timer) FD: 1 BD: 5 ....: (&ipvs->dest_trash_timer) FD: 1 BD: 5 +.+.: (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) FD: 144 BD: 6 +.+.: (work_completion)(&(&ipvs->est_reload_work)->work) ->ipvs->est_mutex FD: 1 BD: 5 +...: recent_lock FD: 135 BD: 5 +.+.: hashlimit_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->&base->lock ->&ent->pde_unload_lock ->&rq->__lock FD: 1 BD: 5 +.+.: (work_completion)(&(&cnet->ecache.dwork)->work) FD: 1 BD: 5 +.+.: (work_completion)(&net->xfrm.policy_hash_work) FD: 1 BD: 5 +.+.: (work_completion)(&net->xfrm.state_hash_work) FD: 1 BD: 3 +.+.: dev_map_lock FD: 1 BD: 72 +.+.: bcm_notifier_lock FD: 55 BD: 1 +.-.: (&p->forward_delay_timer) ->&br->lock FD: 1 BD: 1 ....: _rs.lock#2 FD: 31 BD: 1 ..-.: &(&net->ipv6.addr_chk_work)->timer FD: 250 BD: 72 +.+.: sk_lock-AF_CAN ->slock-AF_CAN ->clock-AF_CAN ->proc_subdir_lock ->fs_reclaim ->&rq->__lock ->pool_lock#2 ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->&rnp->exp_wq[0] ->&ent->pde_unload_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[1] ->&mm->mmap_lock ->tk_core.seq.seqcount ->&list->lock#5 ->hrtimer_bases.lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->j1939_netdev_lock ->&priv->lock ->&priv->j1939_socks_lock ->pcpu_lock ->&net->can.rcvlists_lock ->free_vmap_area_lock ->vmap_area_lock ->stock_lock ->pcpu_alloc_mutex ->pcpu_alloc_mutex.wait_lock ->&data->lock FD: 1 BD: 73 +...: slock-AF_CAN FD: 1 BD: 73 ++..: clock-AF_CAN FD: 1 BD: 6 +...: smc_v4_hashinfo.lock FD: 883 BD: 5 +.+.: sk_lock-AF_SMC ->slock-AF_SMC ->k-sk_lock-AF_INET ->k-slock-AF_INET ->fs_reclaim ->&c->lock ->pool_lock#2 ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&pnettable->lock ->smc_ib_devices.mutex ->&smc_clc_eid_table.lock ->&obj_hash[i].lock ->&smc->clcsock_release_lock ->k-clock-AF_INET ->smc_v4_hashinfo.lock ->clock-AF_SMC ->&n->list_lock FD: 1 BD: 7 +...: slock-AF_SMC FD: 1 BD: 1 +.-.: k-slock-AF_INET/1 FD: 1 BD: 6 .+.+: &smc_clc_eid_table.lock FD: 871 BD: 6 +.+.: &smc->clcsock_release_lock ->&net->smc.mutex_fback_rsn ->k-clock-AF_INET ->&rq->__lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->&obj_hash[i].lock ->pool_lock#2 ->&dir->lock ->stock_lock ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->nf_sockopt_mutex ->&mm->mmap_lock ->rtnl_mutex ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->__ip_vs_mutex ->__ip_vs_mutex.wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->rtnl_mutex.wait_lock ->ipvs->sync_mutex ->ebt_mutex FD: 1 BD: 7 +.+.: &net->smc.mutex_fback_rsn FD: 1 BD: 3 ..-.: rlock-AF_CAN FD: 1 BD: 3 ..-.: elock-AF_CAN FD: 884 BD: 4 +.+.: (work_completion)(&smc->connect_work) ->k-sk_lock-AF_INET ->k-slock-AF_INET ->sk_lock-AF_SMC ->slock-AF_SMC FD: 1 BD: 6 +...: clock-AF_SMC FD: 1 BD: 11 +.+.: tracepoints_mutex.wait_lock FD: 20 BD: 80 +.+.: &sta->ampdu_mlme.mtx ->&sta->lock FD: 1 BD: 80 +.+.: (work_completion)(&sta->ampdu_mlme.work) FD: 1 BD: 3446 ..-.: &list->lock#26 FD: 1 BD: 80 +.+.: (work_completion)(&sta->drv_deliver_wk) FD: 32 BD: 79 +.-.: (&ifibss->timer) ->&rdev->wiphy_work_lock FD: 872 BD: 6 +.+.: (work_completion)(&(&rdev->dfs_update_channels_wk)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 72 +.+.: (work_completion)(&wdev->disconnect_wk) FD: 1 BD: 72 +.+.: (work_completion)(&wdev->pmsr_free_wk) FD: 1 BD: 72 +.+.: (work_completion)(&sdata->activate_links_work) FD: 1 BD: 75 ....: (&local->dynamic_ps_timer) FD: 28 BD: 75 +.+.: (work_completion)(&local->dynamic_ps_enable_work) ->&rq->__lock FD: 1 BD: 75 +.+.: (work_completion)(&sdata->recalc_smps) FD: 185 BD: 77 +.+.: (work_completion)(&link->csa_finalize_work) ->&wdev->mtx FD: 28 BD: 75 +.+.: (work_completion)(&link->color_change_finalize_work) ->&rq->__lock FD: 1 BD: 75 +.+.: (work_completion)(&(&link->dfs_cac_timer_work)->work) FD: 1 BD: 72 ....: &rdev->dev_wait FD: 1 BD: 76 +...: &qdisc_xmit_lock_key FD: 12 BD: 85 +.-.: &sctp_port_hashtable[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 29 BD: 100 ..-.: &asoc->wait ->&p->pi_lock FD: 1 BD: 4 +...: &mux->lock FD: 185 BD: 75 +.+.: sk_lock-AF_INET6/1 ->slock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#25 ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->fs_reclaim ->tk_core.seq.seqcount ->&list->lock#27 ->&____s->seqcount ->krc.lock ->rcu_node_0 ->&c->lock ->&n->list_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->sctp_assocs_id_lock ->&zone->lock ->&cfs_rq->removed.lock ->quarantine_lock ->&data->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 13 BD: 255 +.-.: sctp_assocs_id_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock FD: 1 BD: 98 ..-.: &list->lock#27 FD: 52 BD: 79 +.-.: slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&sctp_port_hashtable[i].lock ->clock-AF_INET6 ->tk_core.seq.seqcount ->&base->lock ->&____s->seqcount ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET6 ->key#25 FD: 1 BD: 1 ....: _rs.lock#3 FD: 1 BD: 76 +...: &qdisc_xmit_lock_key#2 FD: 1 BD: 11 +.+.: nf_conntrack_mutex.wait_lock FD: 1 BD: 3 +.+.: &knet->mutex FD: 2 BD: 3 +...: &mux->rx_lock ->rlock-AF_KCM FD: 239 BD: 3 +.+.: sk_lock-AF_KCM ->slock-AF_KCM ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&mm->mmap_lock ->clock-AF_KCM ->&obj_hash[i].lock ->&c->lock ->&mux->lock ->&n->list_lock ->&rq->__lock ->&data->lock FD: 1 BD: 4 +...: slock-AF_KCM FD: 1 BD: 4 +...: clock-AF_KCM FD: 28 BD: 3 +.+.: (work_completion)(&kcm->tx_work) ->&rq->__lock FD: 1 BD: 4 ....: rlock-AF_KCM FD: 1 BD: 1 ....: _rs.lock#4 FD: 1 BD: 3 +...: dgram_lock FD: 35 BD: 1 +.+.: sk_lock-AF_IEEE802154 ->slock-AF_IEEE802154 ->(console_sem).lock ->&rq->__lock FD: 1 BD: 2 +...: slock-AF_IEEE802154 FD: 1 BD: 3 +...: clock-AF_IEEE802154 FD: 1 BD: 3 ....: rlock-AF_IEEE802154 FD: 31 BD: 2 +.+.: sk_lock-AF_ALG/1 ->slock-AF_ALG FD: 438 BD: 1 +.+.: sk_lock-AF_RXRPC ->&rq->__lock ->slock-AF_RXRPC ->&rxnet->local_mutex ->&local->services_lock FD: 1 BD: 2 +...: slock-AF_RXRPC FD: 1 BD: 7 +...: smc_lgr_list.lock FD: 1 BD: 3 +...: clock-AF_RXRPC FD: 123 BD: 2 +.+.: free_ipc_work ->&obj_hash[i].lock ->&pool->lock ->&rq->__lock ->mount_lock ->&fsnotify_mark_srcu ->&dentry->d_lock ->&type->s_umount_key#47 ->unnamed_dev_ida.xa_lock ->list_lrus_mutex ->&xa->xa_lock#12 ->pool_lock#2 ->sb_lock ->mnt_id_ida.xa_lock ->&ids->rwsem ->(work_completion)(&ht->run_work) ->&ht->mutex ->percpu_counters_lock ->pcpu_lock ->sysctl_lock ->proc_inum_ida.xa_lock ->stock_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[1] ->quarantine_lock ->&sb->s_type->i_lock_key#23 ->rename_lock.seqcount ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->&rnp->exp_wq[2] ->pool_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 1 +.+.: &mq_lock FD: 129 BD: 3 +.+.: &type->s_umount_key#47 ->shrinker_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq FD: 1 BD: 3 +.+.: &ids->rwsem FD: 1 BD: 141 +.+.: wq_pool_mutex.wait_lock FD: 1 BD: 143 +.+.: wq_pool_attach_mutex.wait_lock FD: 1 BD: 72 +.+.: isotp_notifier_lock FD: 129 BD: 72 +.+.: &pn->all_ppp_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 73 +...: &ppp->rlock FD: 6 BD: 72 +...: &ppp->wlock ->&ppp->rlock ->&list->lock#28 ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 72 +...: &dev_addr_list_lock_key#4 FD: 1 BD: 73 ....: &pf->rwait FD: 1 BD: 74 ....: &list->lock#28 FD: 4 BD: 7 +...: vsock_table_lock ->batched_entropy_u32.lock FD: 1 BD: 72 +.+.: raw_notifier_lock FD: 129 BD: 72 +.+.: &tn->idrinfo->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 872 BD: 1 +.+.: ppp_mutex ->&mm->mmap_lock ->fs_reclaim ->stock_lock ->&c->lock ->pool_lock#2 ->stack_depot_init_mutex ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&n->list_lock ->&pn->all_ppp_mutex ->pcpu_alloc_mutex ->net_rwsem ->&tn->lock ->&x->wait#9 ->&obj_hash[i].lock ->remove_cache_srcu ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->nl_table_lock ->nl_table_wait.lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&idev->mc_lock ->&pnettable->lock ->smc_ib_devices.mutex ->&____s->seqcount#2 ->&____s->seqcount FD: 251 BD: 5 +.+.: sk_lock-AF_VSOCK ->slock-AF_VSOCK ->&rq->__lock ->vsock_table_lock ->clock-AF_VSOCK ->rlock-AF_VSOCK ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&vvs->rx_lock ->&list->lock#38 ->&pool->lock ->&dir->lock ->&obj_hash[i].lock ->sk_lock-AF_VSOCK/1 ->&ei->socket.wq.wait ->&vvs->tx_lock ->&zone->lock ->&____s->seqcount ->&base->lock ->&data->lock ->&____s->seqcount#2 FD: 30 BD: 7 +...: slock-AF_VSOCK ->&sk->sk_lock.wq FD: 1 BD: 7 +...: clock-AF_VSOCK FD: 1 BD: 7 ....: rlock-AF_VSOCK FD: 18 BD: 1 +...: &nr_netdev_xmit_lock_key ->nr_node_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&data->lock FD: 1 BD: 5 +...: nr_node_list_lock FD: 1 BD: 1 +...: clock-AF_LLC FD: 248 BD: 3 +.+.: sk_lock-AF_LLC ->slock-AF_LLC ->&mm->mmap_lock ->&rq->__lock ->llc_sap_list_lock ->fs_reclaim ->pool_lock#2 ->&dir->lock#2 ->&sap->sk_lock ->&c->lock ->wlock-AF_LLC ->&data->lock ->&obj_hash[i].lock ->&base->lock ->&ei->socket.wq.wait ->&cfs_rq->removed.lock FD: 30 BD: 4 +...: slock-AF_LLC ->&sk->sk_lock.wq FD: 1 BD: 3 +...: rds_sock_lock FD: 246 BD: 1 +.+.: sk_lock-AF_RDS ->slock-AF_RDS ->&mm->mmap_lock ->rds_trans_sem ->once_lock ->&rq->__lock ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u16.lock FD: 1 BD: 2 +...: slock-AF_RDS FD: 33 BD: 12 ....: rds_cong_lock FD: 35 BD: 7 ....: rds_conn_lock ->rds_cong_lock ->loop_conns_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 33 BD: 7 +.+.: (work_completion)(&(&cp->cp_conn_w)->work) FD: 44 BD: 6 +.+.: (work_completion)(&(&cp->cp_send_w)->work) ->pool_lock#2 ->&obj_hash[i].lock ->&cp->cp_lock ->&base->lock ->&map->m_waitq ->&rs->rs_recv_lock ->&rm->m_rs_lock ->&c->lock FD: 1 BD: 15 ....: &cp->cp_lock FD: 1 BD: 6 +.+.: (work_completion)(&(&cp->cp_recv_w)->work) FD: 2 BD: 11 ....: &rs->rs_lock ->&cp->cp_lock FD: 42 BD: 9 ....: &rs->rs_recv_lock ->rds_cong_lock ->&ei->socket.wq.wait ->&rm->m_rs_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 8 +...: xfrm_state_gc_lock FD: 3 BD: 10 ....: &rm->m_rs_lock ->&rs->rs_lock FD: 29 BD: 7 ....: &map->m_waitq ->&p->pi_lock FD: 20 BD: 7 +.-.: (&x->rtimer) ->&x->lock FD: 33 BD: 1 ..-.: &(&cp->cp_send_w)->timer FD: 97 BD: 73 +.+.: sk_lock-AF_INET/1 ->slock-AF_INET ->&asoc->wait ->&rq->__lock ->rlock-AF_INET ->&list->lock#25 ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&____s->seqcount ->krc.lock ->quarantine_lock ->&cfs_rq->removed.lock ->sctp_assocs_id_lock FD: 16 BD: 77 +.-.: _xmit_NONE#2 ->pool_lock#2 ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 1 BD: 74 +.-.: rlock-AF_INET FD: 9 BD: 72 +.+.: mrt_lock ->pool_lock#2 ->&dir->lock#2 FD: 134 BD: 1 +.+.: mem_id_lock ->&rq->__lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->mem_id_pool.xa_lock ->&ht->lock FD: 1 BD: 2 ..-.: mem_id_pool.xa_lock FD: 1 BD: 1 +...: &r->producer_lock#3 FD: 1 BD: 1 +...: &r->consumer_lock#3 FD: 1 BD: 128 ....: key#23 FD: 1 BD: 3 +...: clock-AF_RDS FD: 1 BD: 3 ....: rds_cong_monitor_lock FD: 1 BD: 3 ....: &rs->rs_rdma_lock FD: 1 BD: 3 ....: &q->lock FD: 1 BD: 3 ....: (&llc->pf_cycle_timer.timer) FD: 1 BD: 3 ....: (&llc->ack_timer.timer) FD: 1 BD: 3 ....: (&llc->rej_sent_timer.timer) FD: 1 BD: 3 ....: (&llc->busy_state_timer.timer) FD: 1 BD: 3 ....: rlock-AF_LLC FD: 1 BD: 4 ....: wlock-AF_LLC FD: 1 BD: 3 ....: &list->lock#29 FD: 31 BD: 1 ..-.: &(&hctx->run_work)->timer FD: 1 BD: 139 +.+.: freezer_mutex.wait_lock FD: 134 BD: 9 +.+.: nlk_cb_mutex-NETFILTER ->fs_reclaim ->pool_lock#2 ->&nf_conntrack_locks[i] ->&rq->__lock ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&c->lock ->ip_set_ref_lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 8 +.+.: nf_sockopt_mutex.wait_lock FD: 1 BD: 1 ....: net_ratelimit_state.lock FD: 1 BD: 73 +...: &ipvs->sync_buff_lock FD: 1 BD: 3 +.+.: pfkey_mutex FD: 1 BD: 5 ....: rlock-AF_KEY FD: 1 BD: 3 +...: clock-AF_KEY FD: 1 BD: 3 ....: wlock-AF_KEY FD: 156 BD: 3 +.+.: sk_lock-AF_PPPOX ->slock-AF_PPPOX ->&pn->hash_lock ->clock-AF_PPPOX ->rlock-AF_PPPOX ->fs_reclaim ->&c->lock ->pool_lock#2 ->&ps->sk_lock ->&pch->chan_sem ->&pch->upl ->&pn->all_channels_lock ->&pf->rwait ->&dir->lock ->&obj_hash[i].lock ->&list->lock#28 ->&tunnel->hlist_lock ->&rq->__lock ->&pn->l2tp_session_hlist_lock ->&list->lock#37 ->chan_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 1 BD: 4 +...: slock-AF_PPPOX FD: 1 BD: 4 +...: clock-AF_PPPOX FD: 1 BD: 4 ..-.: rlock-AF_PPPOX FD: 9 BD: 1 ....: &dtab->index_lock ->stock_lock ->pool_lock#2 ->&c->lock FD: 3 BD: 241 ....: kernfs_pr_cont_lock ->kernfs_rename_lock ->(console_sem).lock FD: 1 BD: 1 ....: _rs.lock#5 FD: 1 BD: 82 +...: l2tp_ip_lock FD: 1 BD: 3514 +...: &net->can.rcvlists_lock FD: 9 BD: 3514 +...: &priv->lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 73 +...: &priv->j1939_socks_lock FD: 9 BD: 80 ..-.: &local->ack_status_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 10 +...: ip_set_ref_lock FD: 1 BD: 2 +.+.: vlan_ioctl_mutex.wait_lock FD: 1 BD: 5 +.+.: &pnsocks.lock FD: 1 BD: 4 +.+.: resource_mutex FD: 1 BD: 3 +...: clock-AF_PHONET FD: 1 BD: 3 ....: rlock-AF_PHONET FD: 132 BD: 11 +.+.: reg_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 2 BD: 130 +.+.: lock#10 FD: 4 BD: 2 +.-.: icmp_global.lock ->batched_entropy_u8.lock FD: 246 BD: 1 +.+.: &tfile->napi_mutex ->&____s->seqcount ->pool_lock#2 ->&mm->mmap_lock ->pcpu_lock ->&obj_hash[i].lock ->&rq->__lock ->&c->lock ->&____s->seqcount#2 FD: 1 BD: 77 +...: &msk->pm.lock FD: 495 BD: 8 +.+.: (work_completion)(&msk->work) ->sk_lock-AF_INET6 ->slock-AF_INET6 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->stock_lock ->&rq->__lock FD: 1 BD: 72 +...: _xmit_NETROM#2 FD: 1 BD: 72 ....: wlock-AF_UNSPEC FD: 1 BD: 72 ....: elock-AF_UNSPEC FD: 33 BD: 2 +.+.: &ilan->xlat.locks FD: 1 BD: 76 +...: &vlan_netdev_xmit_lock_key FD: 1 BD: 79 +.+.: (work_completion)(&(&priv->gc_work)->work) FD: 41 BD: 74 +.-.: (&mp->timer) ->&br->multicast_lock FD: 35 BD: 3 +.+.: &chan->lock/1 ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->clock-AF_BLUETOOTH ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->&rq->__lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock FD: 29 BD: 4 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->&rq->__lock ->slock-AF_BLUETOOTH-BTPROTO_L2CAP FD: 1 BD: 4 +...: unix_dgram_prot_lock FD: 5 BD: 74 +...: &stab->lock ->&psock->link_lock FD: 4 BD: 75 +...: &psock->link_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 74 +...: &psock->ingress_lock FD: 1 BD: 5 +.+.: (work_completion)(&(&psock->work)->work) FD: 11 BD: 2 +.+.: (work_completion)(&(&psock->rwork)->work) ->&obj_hash[i].lock ->(work_completion)(&(&psock->work)->work) ->&list->lock#30 ->rlock-AF_UNIX ->pool_lock#2 ->&dir->lock ->stock_lock FD: 1 BD: 3 ....: &list->lock#30 FD: 1 BD: 4 .+.+: ax25_uid_lock FD: 1 BD: 7 ..-.: &list->lock#31 FD: 43 BD: 1 +.-.: net/netrom/nr_loopback.c:18 ->&list->lock#31 ->nr_list_lock ->&c->lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->slock-AF_NETROM ->&base->lock ->&data->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 17 +.+.: (work_completion)(&data->fib_flush_work) FD: 1 BD: 17 +.+.: ®ion->snapshot_lock FD: 1 BD: 4706 .-.-: init_task.mems_allowed_seq.seqcount FD: 154 BD: 2 +.+.: (work_completion)(&con->swork) ->&con->outqueue_lock ->pool_lock#2 ->&list->lock#32 ->&c->lock ->&list->lock#21 ->&n->list_lock ->&con->sub_lock ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC FD: 41 BD: 1 +.-.: (&sk->sk_timer)#2 ->slock-AF_NETROM ->nr_list_lock ->&obj_hash[i].lock ->wlock-AF_NETROM ->&list->lock#22 ->rlock-AF_NETROM ->&base->lock ->&data->lock ->pool_lock#2 FD: 1 BD: 72 +.+.: ifalias_mutex FD: 1 BD: 109 ....: key#24 FD: 1 BD: 104 ....: &list->lock#33 FD: 1 BD: 81 ....: key#25 FD: 1 BD: 1 ....: &head->lock FD: 1 BD: 295 +.+.: rcu_state.exp_wake_mutex.wait_lock FD: 41 BD: 22 +...: &con->sub_lock ->&tn->nametbl_lock ->&obj_hash[i].lock ->pool_lock#2 ->(&sub->timer) ->&base->lock FD: 35 BD: 24 +.-.: &sub->lock ->&srv->idr_lock ->pool_lock#2 ->&con->outqueue_lock ->&c->lock ->&n->list_lock FD: 1 BD: 73 +...: &tipc_net(net)->bclock FD: 145 BD: 1 ++++: kn->active#58 ->&rq->__lock ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 911 BD: 1 .+.+: &rdma_nl_types[idx].sem ->devices_rwsem ->fs_reclaim ->pool_lock#2 ->clients_rwsem ->&obj_hash[i].lock ->rlock-AF_NETLINK ->link_ops_rwsem ->&c->lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount FD: 4 BD: 25 +.-.: &con->outqueue_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 155 BD: 1 +.+.: (wq_completion)tipc_send ->(work_completion)(&con->swork) FD: 1 BD: 7 +...: &list->lock#32 FD: 1 BD: 104 +...: &policy->lock FD: 1 BD: 82 +.-.: &r->producer_lock#4 FD: 1 BD: 1 +.+.: (work_completion)(&hdev->reenable_adv_work) FD: 131 BD: 1 +.+.: &type->s_umount_key#48 ->shrinker_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/1 ->&sb->s_type->i_lock_key#32 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->binderfs_minors_mutex ->sb_lock ->&rq->__lock FD: 28 BD: 1 +.+.: (work_completion)(&data->suspend_work) ->&rq->__lock FD: 1 BD: 10 +.+.: (work_completion)(&(&hdev->interleave_scan)->work) FD: 1 BD: 23 +.+.: (work_completion)(&(&conn->id_addr_timer)->work) FD: 34 BD: 22 +.+.: (work_completion)(&(&conn->disc_work)->work) ->pool_lock#2 ->&c->lock ->&list->lock#9 ->&n->list_lock FD: 1 BD: 22 +.+.: (work_completion)(&(&conn->auto_accept_work)->work) FD: 1 BD: 22 +.+.: (work_completion)(&(&conn->idle_work)->work) FD: 1 BD: 5 +.+.: (work_completion)(&rfkill->uevent_work) FD: 1 BD: 76 +...: _xmit_IPGRE#2 FD: 1 BD: 76 +...: _xmit_SIT#2 FD: 1 BD: 76 +...: _xmit_TUNNEL#2 FD: 1 BD: 72 +...: &pmc->lock FD: 1 BD: 5 +.+.: (work_completion)(&(&local->roc_work)->work) FD: 1 BD: 5 +.+.: (work_completion)(&local->restart_work) FD: 1 BD: 5 +.+.: (work_completion)(&local->sched_scan_stopped_work) FD: 1 BD: 5 +.+.: (work_completion)(&local->radar_detected_work) FD: 1 BD: 5 +.+.: (work_completion)(&rdev->conn_work) FD: 1 BD: 5 +.+.: (work_completion)(&(&rdev->background_cac_done_wk)->work) FD: 872 BD: 6 +.+.: (work_completion)(&rdev->destroy_work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 5 +.+.: (work_completion)(&rdev->propagate_radar_detect_wk) FD: 1 BD: 5 +.+.: (work_completion)(&rdev->propagate_cac_done_wk) FD: 1 BD: 5 +.+.: (work_completion)(&rdev->mgmt_registrations_update_wk) FD: 1 BD: 5 +.+.: (work_completion)(&rdev->background_cac_abort_wk) FD: 1 BD: 5 ....: (&local->sta_cleanup) FD: 1 BD: 72 +.+.: (work_completion)(&(&priv->connect)->work) FD: 1 BD: 72 ....: (&pmctx->ip6_mc_router_timer) FD: 1 BD: 72 ....: (&pmctx->ip4_mc_router_timer) FD: 1 BD: 76 +...: &batadv_netdev_xmit_lock_key FD: 1 BD: 76 +...: &qdisc_xmit_lock_key#3 FD: 1 BD: 76 +...: &qdisc_xmit_lock_key#4 FD: 1 BD: 76 +...: _xmit_LOOPBACK#2 FD: 64 BD: 73 +.-.: (&peer->timer_new_handshake) ->&peer->endpoint_lock FD: 33 BD: 73 +.-.: (&peer->timer_zero_key_material) FD: 133 BD: 76 +.+.: (work_completion)(&peer->clear_peer_work) ->&handshake->lock ->&peer->keypairs.keypair_update_lock ->&rq->__lock FD: 31 BD: 77 +.+.: (work_completion)(&(&bond->mii_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 72 +.+.: (work_completion)(&(&bond->arp_work)->work) FD: 28 BD: 73 +.+.: (work_completion)(&(&bond->alb_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 32 BD: 76 +.+.: (work_completion)(&(&bond->ad_work)->work) ->&bond->mode_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 228 BD: 3412 +.+.: (work_completion)(&(&bond->mcast_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->pool_lock#2 ->rcu_node_0 ->rtnl_mutex.wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->stock_lock FD: 1 BD: 72 +.+.: (work_completion)(&(&bond->slave_arr_work)->work) FD: 32 BD: 72 +.+.: (work_completion)(&port->wq) ->&list->lock#46 ->&rq->__lock FD: 1 BD: 72 +...: &bond->ipsec_lock FD: 1 BD: 1 +.+.: (work_completion)(&(&team->mcast_rejoin.dw)->work) FD: 1 BD: 1 +.+.: (work_completion)(&(&team->notify_peers.dw)->work) FD: 230 BD: 1 +.+.: (wq_completion)hci1#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg0#14 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg1#14 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#7 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#7 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2#13 ->(work_completion)(&peer->transmit_handshake_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg2#14 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#7 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 21 BD: 75 +.+.: (wq_completion)phy15 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 75 +.+.: (wq_completion)phy16 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 3426 ..-.: key#26 FD: 20 BD: 3425 +...: &vlan_netdev_addr_lock_key/2 ->&obj_hash[i].lock ->krc.lock ->&batadv_netdev_addr_lock_key/1 FD: 91 BD: 1 +.-.: (&asoc->timers[i]) ->slock-AF_INET6 FD: 33 BD: 1 ..-.: &(&conn->disc_work)->timer FD: 4 BD: 1 +...: noop_qdisc.busylock ->noop_qdisc.q.lock FD: 131 BD: 73 +.+.: &block->lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&rq->__lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount#2 ->&____s->seqcount ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock FD: 131 BD: 72 ++++: &block->cb_lock ->flow_indr_block_lock ->&tp->lock ->&rq->__lock FD: 129 BD: 73 +.+.: flow_indr_block_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&rq->__lock FD: 1 BD: 72 +.+.: (work_completion)(&q->work) FD: 1 BD: 82 +.+.: nf_ct_proto_mutex.wait_lock FD: 134 BD: 3 +.+.: nlk_cb_mutex-SOCK_DIAG ->fs_reclaim ->pool_lock#2 ->inet_diag_table_mutex ->rlock-AF_NETLINK ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&rq->__lock FD: 138 BD: 1 +.+.: sock_diag_mutex ->sock_diag_table_mutex ->sock_diag_mutex.wait_lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->&c->lock ->&n->list_lock ->&____s->seqcount FD: 1 BD: 3 +...: ip6_ra_lock FD: 64 BD: 3 +.+.: sk_lock-AF_CAIF ->slock-AF_CAIF ->&obj_hash[i].lock ->&this->info_list_lock ->(console_sem).lock ->&rq->__lock ->&ei->socket.wq.wait ->clock-AF_CAIF ->elock-AF_CAIF ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->console_owner_lock ->console_owner FD: 1 BD: 4 +...: slock-AF_CAIF FD: 1 BD: 3 +...: rlock-AF_CAIF FD: 1 BD: 4 +...: clock-AF_CAIF FD: 1 BD: 4 ....: elock-AF_CAIF FD: 1 BD: 2 +.+.: sock_diag_mutex.wait_lock FD: 33 BD: 1 +.-.: (&pool->idle_timer) ->&pool->lock/1 ->&pool->lock FD: 9 BD: 3 +...: &____s->seqcount#13 ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock FD: 10 BD: 2 +...: &(&bp->lock)->lock ->&____s->seqcount#13 FD: 33 BD: 3 +.-.: &q->lock#2 ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock FD: 1 BD: 75 +.+.: __ip_vs_mutex.wait_lock FD: 3 BD: 3 +.+.: unix_gc_lock ->unix_gc_wait.lock ->rlock-AF_UNIX FD: 1 BD: 1 ....: &wq#4 FD: 1 BD: 75 +.+.: &s->lock FD: 1 BD: 1 ....: _rs.lock#6 FD: 137 BD: 3 +.+.: sk_lock-AF_NFC ->slock-AF_NFC ->&k->list_lock ->&k->k_lock ->llcp_devices_lock ->fs_reclaim ->pool_lock#2 ->&rq->__lock ->&local->sdp_lock ->&local->sockets.lock ->&obj_hash[i].lock ->&local->raw_sockets.lock FD: 1 BD: 4 +...: slock-AF_NFC FD: 1 BD: 4 +.+.: llcp_devices_lock FD: 2 BD: 4 +.+.: &local->sdp_lock ->&local->sockets.lock FD: 1 BD: 5 ++++: &local->sockets.lock FD: 1 BD: 3 +...: clock-AF_NFC FD: 1 BD: 3 ....: rlock-AF_NFC FD: 1 BD: 3 ....: &list->lock#34 FD: 1 BD: 4 ....: unix_gc_wait.lock FD: 134 BD: 8 +.+.: crypto_default_null_skcipher_lock ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 19 BD: 8 +.-.: &x->lock ->&net->xfrm.xfrm_state_lock FD: 172 BD: 6 +.+.: msk_lock-AF_INET ->mlock-AF_INET ->fs_reclaim ->stock_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->&obj_hash[i].lock ->k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->&xa->xa_lock#6 ->&rq->__lock ->&fsnotify_mark_srcu ->&msk->pm.lock ->elock-AF_INET6 ->&c->lock FD: 1 BD: 7 +...: mlock-AF_INET FD: 929 BD: 2 +.+.: (work_completion)(&nlk->work) ->&obj_hash[i].lock ->pool_lock#2 ->rlock-AF_NETLINK ->&dir->lock ->quarantine_lock ->genl_mutex ->&data->lock ->&rq->__lock ->vmap_area_lock ->purge_vmap_area_lock FD: 2 BD: 1 ....: &loc_l->lock ->&l->lock FD: 1 BD: 2 ....: &l->lock FD: 14 BD: 1 +.-.: (t) ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4 +...: clock-AF_ISDN FD: 1 BD: 3 +...: base_sockets.lock FD: 150 BD: 139 +.+.: &journal->j_barrier ->rcu_node_0 ->&rq->__lock ->&journal->j_state_lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&journal->j_list_lock ->&journal->j_checkpoint_mutex ->jbd2_handle ->&lock->wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 108 +...: &____s->seqcount#14 FD: 1 BD: 4 ....: rlock-AF_X25 FD: 33 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM ->slock-AF_BLUETOOTH-BTPROTO_RFCOMM ->rlock-AF_BLUETOOTH ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 31 BD: 1 ..-.: security/integrity/ima/ima_queue_keys.c:35 FD: 5 BD: 2 +.+.: (ima_keys_delayed_work).work ->ima_keys_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 4 ....: &list->lock#35 FD: 34 BD: 1 +.-.: (&q->timer) ->pool_lock#2 ->&obj_hash[i].lock FD: 303 BD: 1 +.+.: bpf_stats_enabled_mutex ->&newf->file_lock ->fs_reclaim ->stock_lock ->&rq->__lock ->pool_lock#2 ->&sb->s_type->i_lock_key#15 ->cpu_hotplug_lock ->&c->lock ->&n->list_lock ->key#5 FD: 1 BD: 4 +.+.: rfcomm_sk_list.lock FD: 9 BD: 3 +.+.: sk_lock-AF_X25 ->slock-AF_X25 ->wlock-AF_X25 ->&list->lock#35 ->&obj_hash[i].lock ->x25_list_lock ->rlock-AF_X25 ->x25_route_list_lock FD: 1 BD: 4 +...: slock-AF_X25 FD: 1 BD: 4 ....: wlock-AF_X25 FD: 1 BD: 4 +...: slock-AF_BLUETOOTH-BTPROTO_RFCOMM FD: 1 BD: 3 +...: &htab->buckets[i].lock FD: 1 BD: 3 +.+.: &d->lock FD: 1 BD: 3 ....: &list->lock#36 FD: 1 BD: 1 ....: _rs.lock#7 FD: 29 BD: 3781 ....: &ep->poll_wait/1 ->&p->pi_lock FD: 1 BD: 6 +...: &inst->lock FD: 149 BD: 73 +.+.: k-sk_lock-AF_INET/1 ->k-slock-AF_INET ->&dir->lock ->fs_reclaim ->k-clock-AF_INET ->&c->lock ->&rq->__lock ->pool_lock#2 FD: 32 BD: 3 ....: &ep->poll_wait ->&ep->lock FD: 1 BD: 75 +.-.: ip6_sk_fl_lock FD: 133 BD: 72 +.+.: &chain->filter_chain_lock ->&rq->__lock ->&block->proto_destroy_lock ->&block->lock FD: 28 BD: 73 +.+.: &block->proto_destroy_lock ->&rq->__lock FD: 31 BD: 3 +.+.: sk_lock-AF_QIPCRTR ->slock-AF_QIPCRTR ->clock-AF_QIPCRTR ->rlock-AF_QIPCRTR ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_QIPCRTR FD: 1 BD: 4 +...: clock-AF_QIPCRTR FD: 1 BD: 4 ....: rlock-AF_QIPCRTR FD: 1 BD: 3 ..-.: wlock-AF_PPPOX FD: 308 BD: 3 +.+.: sched_register_mutex ->tracepoints_mutex FD: 15 BD: 6 +.-.: (&map->gc) ->&set->lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 7 +.-.: &set->lock FD: 1 BD: 1 ....: _rs.lock#8 FD: 145 BD: 1 ++++: kn->active#59 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->pool_lock#2 ->&group->rtpoll_trigger_lock ->&rq->__lock FD: 131 BD: 73 +.+.: &group->rtpoll_trigger_lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&base->lock FD: 29 BD: 2 ..-.: &group->rtpoll_wait ->&p->pi_lock FD: 30 BD: 1 +.-.: (&group->rtpoll_timer) ->&group->rtpoll_wait FD: 1 BD: 4 +.+.: sco_sk_list.lock FD: 29 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_BLUETOOTH-BTPROTO_SCO FD: 1 BD: 88 ...-: &f->f_owner.lock FD: 1 BD: 72 ....: (&q->adapt_timer) FD: 1 BD: 76 ....: fastopen_seqlock.seqcount FD: 1 BD: 135 ....: &sem->waiters FD: 34 BD: 1 +.+.: (wq_completion)bond1 ->(work_completion)(&(&bond->mii_work)->work) ->(work_completion)(&(&bond->ad_work)->work) ->&rq->__lock FD: 4 BD: 79 +...: &bond->mode_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 33 BD: 1 ..-.: &(&bond->ad_work)->timer FD: 33 BD: 1 ..-.: &(&bond->mii_work)->timer FD: 15 BD: 75 +.-.: (&tbl->periodic_timer) ->&obj_hash[i].lock ->&base->lock ->&svc->sched_lock FD: 34 BD: 1 +.+.: (wq_completion)bond2 ->(work_completion)(&(&bond->mii_work)->work) ->(work_completion)(&(&bond->ad_work)->work) FD: 34 BD: 1 +.+.: (wq_completion)bond3 ->(work_completion)(&(&bond->mii_work)->work) ->(work_completion)(&(&bond->ad_work)->work) FD: 10 BD: 12 +...: &pn->l2tp_tunnel_idr_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&n->list_lock FD: 133 BD: 4 +.+.: &ps->sk_lock ->&tunnel->hlist_lock ->&rq->__lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&dir->lock ->&pn->all_channels_lock FD: 2 BD: 11 +...: &tunnel->hlist_lock ->&pn->l2tp_session_hlist_lock FD: 1 BD: 12 +...: &pn->l2tp_session_hlist_lock FD: 1 BD: 5 +...: &pn->all_channels_lock FD: 29 BD: 4 +.+.: &pch->chan_sem ->&rq->__lock ->&pch->downl FD: 1 BD: 5 +...: &pch->downl FD: 1 BD: 4 +...: &pch->upl FD: 1 BD: 10 ....: &list->lock#37 FD: 275 BD: 6 +.+.: (work_completion)(&tunnel->del_work) ->&tunnel->hlist_lock ->&pn->l2tp_tunnel_idr_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->l2tp_ip_lock ->k-clock-AF_INET ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#6 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->krc.lock ->&dir->lock ->stock_lock ->&pn->l2tp_session_hlist_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&list->lock#37 ->&rnp->exp_lock ->rcu_state.exp_mutex ->clock-AF_INET6 FD: 34 BD: 1 +.+.: (wq_completion)bond4 ->(work_completion)(&(&bond->mii_work)->work) ->(work_completion)(&(&bond->ad_work)->work) FD: 1 BD: 3 +.+.: &ping_table.lock FD: 133 BD: 129 +.+.: &po->pg_vec_lock ->rlock-AF_PACKET ->&rq->__lock ->&vma->vm_lock->lock ->ptlock_ptr(page)#2 ->wlock-AF_PACKET ->&data->lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->&____s->seqcount ->stock_lock ->ptlock_ptr(page) ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 ....: _rs.lock#9 FD: 2 BD: 75 +.+.: &match->lock ->ptype_lock FD: 1 BD: 7 +...: &vvs->rx_lock FD: 1 BD: 7 +...: &list->lock#38 FD: 253 BD: 1 +.+.: (wq_completion)vsock-loopback ->(work_completion)(&vsock->pkt_work) FD: 252 BD: 2 +.+.: (work_completion)(&vsock->pkt_work) ->&list->lock#38 ->vsock_table_lock ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->&obj_hash[i].lock ->pool_lock#2 FD: 138 BD: 6 +.+.: sk_lock-AF_VSOCK/1 ->slock-AF_VSOCK ->fs_reclaim ->pool_lock#2 ->&vvs->tx_lock ->vsock_table_lock ->&vvs->rx_lock ->&list->lock#38 ->&c->lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->clock-AF_VSOCK ->rlock-AF_VSOCK FD: 1 BD: 7 +...: &vvs->tx_lock FD: 155 BD: 1 +.+.: (wq_completion)tipc_rcv ->(work_completion)(&srv->awork) ->(work_completion)(&con->rwork) FD: 153 BD: 2 +.+.: (work_completion)(&srv->awork) ->&srv->idr_lock ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&obj_hash[i].lock ->k-clock-AF_TIPC ->&xa->xa_lock#6 ->&fsnotify_mark_srcu FD: 135 BD: 16 +.+.: k-sk_lock-AF_TIPC/1 ->k-slock-AF_TIPC ->&obj_hash[i].lock ->&base->lock ->fs_reclaim ->pool_lock#2 ->&list->lock#21 FD: 144 BD: 2 +.+.: (work_completion)(&con->rwork) ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->k-clock-AF_TIPC ->&srv->idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#6 ->&fsnotify_mark_srcu ->&con->outqueue_lock FD: 36 BD: 23 +.-.: (&sub->timer) ->&sub->lock FD: 35 BD: 3 +.+.: &ep->mtx/1 ->&rq->__lock ->&f->f_lock ->&ep->lock FD: 146 BD: 1 .+.+: kn->active#60 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->devcgroup_mutex FD: 1 BD: 10 +.+.: ima_extend_list_mutex.wait_lock FD: 1 BD: 1 ....: _rs.lock#10 FD: 1 BD: 4 +.+.: raw_sk_list.lock FD: 1 BD: 4 +.+.: &local->raw_sockets.lock FD: 40 BD: 2 +.+.: (work_completion)(&umem->work) ->umem_ida.xa_lock ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->&lruvec->lru_lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4 +...: clock-AF_AX25 FD: 21 BD: 1 +.+.: (wq_completion)t ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 4 ....: &list->lock#39 FD: 1 BD: 4 ....: rlock-AF_AX25 FD: 34 BD: 3 +.+.: sk_lock-AF_AX25 ->slock-AF_AX25 ->clock-AF_AX25 ->ax25_list_lock ->&obj_hash[i].lock ->&list->lock#39 ->rlock-AF_AX25 ->wlock-AF_AX25 ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_AX25 FD: 1 BD: 4 +...: ax25_list_lock FD: 1 BD: 4 ....: wlock-AF_AX25 FD: 145 BD: 1 .+.+: kn->active#61 ->fs_reclaim ->stock_lock ->&rq->__lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 1 BD: 1 ....: _rs.lock#11 FD: 21 BD: 1 +.+.: (wq_completion)phy18 ->(work_completion)(&local->reconfig_filter) FD: 145 BD: 1 .+.+: kn->active#62 ->&rq->__lock ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 21 BD: 1 +.+.: (wq_completion)phy19 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 98 ..-.: &list->lock#40 FD: 91 BD: 1 +.-.: (&peer->T3_rtx_timer) ->slock-AF_INET6 FD: 1 BD: 72 ....: &____s->seqcount#15 FD: 1 BD: 72 ....: &____s->seqcount#16 FD: 1 BD: 3 +.+.: nfnl_grp_active_lock FD: 21 BD: 1 +.+.: (wq_completion)phy20 ->(work_completion)(&local->reconfig_filter) FD: 305 BD: 1 +.+.: bpf_dispatcher_xdp.mutex ->pack_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->bpf_lock ->text_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->&rnp->exp_wq[3] ->&rq->__lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 1 BD: 4 +.+.: delayed_uprobe_lock.wait_lock FD: 31 BD: 1 ..-.: net/ipv6/ip6_flowlabel.c:57 FD: 1 BD: 72 ....: (&q->perturb_timer) FD: 16 BD: 1 +.-.: net/ipv6/ip6_flowlabel.c:47 ->ip6_fl_lock FD: 301 BD: 2 +.+.: ((ipv6_flowlabel_exclusive).work).work ->cpu_hotplug_lock FD: 1 BD: 1 +.+.: bpf_module_mutex FD: 31 BD: 1 ..-.: &(&hinfo->gc_work)->timer FD: 29 BD: 2 +.+.: (work_completion)(&(&hinfo->gc_work)->work) ->&hinfo->lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 3 +...: &hinfo->lock FD: 1 BD: 1 ....: _rs.lock#12 FD: 1 BD: 1 ....: _rs.lock#13 FD: 134 BD: 73 +.+.: tcp_md5sig_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&rq->__lock FD: 1 BD: 1 ....: _rs.lock#14 FD: 31 BD: 1 ..-.: net/ipv4/tcp_ipv4.c:1061 FD: 301 BD: 2 +.+.: ((tcp_md5_needed).work).work ->cpu_hotplug_lock FD: 30 BD: 83 +.+.: (work_completion)(&(&flowtable->gc_work)->work) ->&rq->__lock ->&ht->lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 12 +.+.: ebt_mutex.wait_lock FD: 31 BD: 1 ..-.: net/ipv4/devinet.c:474 FD: 1 BD: 1 ....: &qs->lock FD: 25 BD: 73 +.-.: (&tw->tw_timer) ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->stock_lock ->&obj_hash[i].lock ->&dccp_hashinfo.bhash[i].lock FD: 1 BD: 1 ....: nopage_rs.lock FD: 1 BD: 85 ....: &new->fa_lock FD: 132 BD: 1 +.+.: &audit_cmd_mutex.lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->rlock-AF_NETLINK ->&rq->__lock ->&n->list_lock ->tk_core.seq.seqcount ->&____s->seqcount ->&obj_hash[i].lock ->&list->lock ->kauditd_wait.lock FD: 1 BD: 78 ....: &rdev->wpan_phy.sync_txq FD: 1 BD: 78 ....: &rdev->wpan_phy.queue_lock FD: 1 BD: 78 ..-.: &list->lock#41 FD: 1 BD: 3 ++.-: raw_lock FD: 145 BD: 3 +.+.: sk_lock-AF_PHONET ->slock-AF_PHONET ->port_mutex#2 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&pnsocks.lock ->resource_mutex ->&rq->__lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4 +...: slock-AF_PHONET FD: 3 BD: 4 +.+.: port_mutex#2 ->local_port_range_lock.seqcount ->&pnsocks.lock FD: 1 BD: 5 ....: local_port_range_lock.seqcount FD: 1 BD: 3 ....: &list->lock#42 FD: 246 BD: 72 +.+.: sk_lock-AF_UNSPEC ->&rq->__lock ->slock-AF_UNSPEC ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&c->lock ->&____s->seqcount ->stock_lock ->&n->list_lock ->pcpu_alloc_mutex ->&mm->mmap_lock ->&obj_hash[i].lock ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->remove_cache_srcu ->init_mm.page_table_lock FD: 1 BD: 73 +...: slock-AF_UNSPEC FD: 1 BD: 4 +.+.: oom_adj_mutex.wait_lock FD: 38 BD: 2 +.+.: (work_completion)(&pool->idle_cull_work) ->wq_pool_attach_mutex ->wq_pool_attach_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 ....: &so->wait FD: 1 BD: 1 +.+.: &type->s_umount_key#49 FD: 130 BD: 1 +.+.: crypto_cfg_mutex ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->&c->lock ->&rq->__lock FD: 1 BD: 1 +...: &list->lock#43 FD: 133 BD: 127 +.+.: &sb->s_type->i_mutex_key#21 ->tk_core.seq.seqcount ->fs_reclaim ->pool_lock#2 ->&rq->__lock FD: 12 BD: 128 +.+.: &hugetlbfs_i_mmap_rwsem_key ->&obj_hash[i].lock ->pool_lock#2 ->ptlock_ptr(page) FD: 13 BD: 127 +.+.: &vma_lock->rw_sema ->&hugetlbfs_i_mmap_rwsem_key FD: 1 BD: 1 +.+.: &resv_map->lock FD: 74 BD: 1 .+.+: sb_writers#14 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#16 ->&rq->__lock ->&wb->list_lock FD: 1 BD: 4 +...: reuseport_lock FD: 1 BD: 1 ....: _rs.lock#15 FD: 3 BD: 72 +...: mfc_unres_lock ->&obj_hash[i].lock FD: 311 BD: 6 +.+.: net_dm_mutex ->&obj_hash[i].lock ->fs_reclaim ->pool_lock#2 ->&data->lock ->&rq->__lock ->tracepoints_mutex ->&ACCESS_PRIVATE(sdp, lock) ->tracepoint_srcu ->&x->wait#3 ->(&data->send_timer) ->&base->lock ->(work_completion)(&data->dm_alert_work) ->&c->lock FD: 14 BD: 3724 ..-.: &data->lock ->&obj_hash[i].lock ->&base->lock FD: 31 BD: 7 +.-.: (&data->send_timer) FD: 132 BD: 7 +.+.: (work_completion)(&data->dm_alert_work) ->fs_reclaim ->pool_lock#2 ->&data->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&rq->__lock ->&meta->lock ->quarantine_lock ->&____s->seqcount#2 ->&____s->seqcount ->remove_cache_srcu ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 ....: _rs.lock#16 FD: 14 BD: 18 +.+.: &pdata->netdev_lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->ndev_hash_lock ->&obj_hash[i].lock FD: 1 BD: 19 ....: ndev_hash_lock FD: 2 BD: 11 +.+.: devices.xa_lock ->pool_lock#2 FD: 874 BD: 17 +.+.: &rxe->usdev_lock ->&pdata->netdev_lock ->rtnl_mutex ->&rq->__lock ->(console_sem).lock ->&lock->wait_lock ->&pool->lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&rcu_state.expedited_wq FD: 131 BD: 3527 +.+.: &table->lock#4 ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&table->rwlock ->&device->event_handler_rwsem ->&c->lock ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3528 ....: &table->rwlock FD: 31 BD: 3530 ++++: &device->event_handler_rwsem ->&rq->__lock FD: 1 BD: 5 ....: &device->cache_lock FD: 1 BD: 3 +.+.: rdmacg_mutex FD: 3 BD: 13 +.+.: subsys mutex#83 ->&k->k_lock FD: 877 BD: 1 +.+.: (wq_completion)infiniband ->(work_completion)(&work->work)#2 FD: 876 BD: 2 +.+.: (work_completion)(&work->work)#2 ->fs_reclaim ->pool_lock#2 ->&rxe->usdev_lock ->&device->cache_lock ->&obj_hash[i].lock ->&device->event_handler_rwsem ->&c->lock ->&n->list_lock FD: 335 BD: 12 ++++: &device->client_data_rwsem ->&xa->xa_lock#15 ->fs_reclaim ->&c->lock ->pool_lock#2 ->&xa->xa_lock#16 ->&xa->xa_lock#17 ->crngs.lock ->&n->list_lock ->&rq->__lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&obj_hash[i].lock ->&cq->cq_lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->rcu_node_0 ->&qp->state_lock ->cpu_hotplug_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_mutex ->ib_mad_port_list_lock ->&mad_queue->lock ->&qp->rq.producer_lock ->ib_mad_clients.xa_lock ->&port_priv->reg_lock ->ib_agent_port_list_lock ->lock ->&root->kernfs_rwsem ->&____s->seqcount#2 ->&cm.device_lock ->lock#7 ->umad_ida.xa_lock ->&x->wait#9 ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&x->wait#11 ->uevent_sock_mutex ->subsys mutex#84 ->pcpu_alloc_mutex ->uverbs_ida.xa_lock ->subsys mutex#85 ->subsys mutex#86 ->(console_sem).lock ->rds_ib_devices_lock ->ib_nodev_conns_lock ->smc_ib_devices.mutex ->&device->event_handler_rwsem ->&pnettable->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&cfs_rq->removed.lock ->purge_vmap_area_lock ->&x->wait#28 ->&x->wait#29 ->&rcu_state.expedited_wq ->krc.lock ->&sem->wait_lock ->kernfs_idr_lock ->remove_cache_srcu FD: 11 BD: 15 +.+.: &xa->xa_lock#15 ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->&n->list_lock FD: 9 BD: 13 +.+.: &xa->xa_lock#16 ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 10 BD: 75 +.+.: &xa->xa_lock#17 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock FD: 1 BD: 13 ....: &cq->cq_lock FD: 1 BD: 13 ....: &qp->state_lock FD: 1 BD: 13 ....: ib_mad_port_list_lock FD: 1 BD: 13 ....: &mad_queue->lock FD: 1 BD: 13 ....: &qp->rq.producer_lock FD: 7 BD: 13 +.+.: ib_mad_clients.xa_lock ->&c->lock ->pool_lock#2 FD: 7 BD: 13 ....: &port_priv->reg_lock ->pool_lock#2 ->&c->lock FD: 1 BD: 13 ....: ib_agent_port_list_lock FD: 1 BD: 13 ....: &cm.device_lock FD: 1 BD: 75 +.+.: &id_priv->qp_mutex FD: 2 BD: 75 +.+.: &xa->xa_lock#18 ->pool_lock#2 FD: 2 BD: 75 ....: &cm_id_priv->lock ->&cm.lock FD: 1 BD: 76 ....: &cm.lock FD: 1 BD: 13 ....: umad_ida.xa_lock FD: 3 BD: 13 +.+.: subsys mutex#84 ->&k->k_lock FD: 1 BD: 13 ....: uverbs_ida.xa_lock FD: 3 BD: 13 +.+.: subsys mutex#85 ->&k->k_lock FD: 3 BD: 13 +.+.: subsys mutex#86 ->&k->k_lock FD: 2 BD: 15 ++++: rds_ib_devices_lock ->&pool->flush_lock FD: 1 BD: 13 +.+.: ib_nodev_conns_lock FD: 1 BD: 1 ....: _rs.lock#17 FD: 1 BD: 1 ....: _rs.lock#18 FD: 876 BD: 2 +.+.: (work_completion)(&smcibdev->port_event_work) ->&rxe->usdev_lock ->&table->rwlock ->smc_lgr_list.lock ->&lock->wait_lock ->&p->pi_lock FD: 877 BD: 12 +.+.: &device->compat_devs_mutex ->fs_reclaim ->&xa->xa_lock#15 ->&rq->__lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&____s->seqcount#2 ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#83 ->&rxe->usdev_lock ->&zone->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&lock->wait_lock ->&p->pi_lock ->&sem->wait_lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->uevent_sock_mutex.wait_lock ->quarantine_lock ->&pgdat->kswapd_wait FD: 1 BD: 1 ....: _rs.lock#19 FD: 1 BD: 1 ....: _rs.lock#20 FD: 1 BD: 6 +.+.: calipso_doi_list_lock FD: 129 BD: 72 +.+.: &tn->idrinfo->lock#3 ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock FD: 1 BD: 4 +.+.: chan_lock FD: 1 BD: 1 +...: &list->lock#44 FD: 1 BD: 21 +...: &list->lock#45 FD: 16 BD: 79 +.-.: &dccp_hashinfo.bhash[i].lock ->&dccp_hashinfo.bhash2[i].lock ->stock_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 15 BD: 80 +.-.: &dccp_hashinfo.bhash2[i].lock ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->clock-AF_INET ->&obj_hash[i].lock ->batched_entropy_u8.lock ->&hashinfo->ehash_locks[i] ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 2 +.+.: loop_validate_mutex.wait_lock FD: 157 BD: 1 +.+.: (wq_completion)bond0#8 ->(work_completion)(&(&slave->notify_work)->work) FD: 200 BD: 73 +.+.: team->team_lock_key#8 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->lweventlist_lock ->(console_sem).lock ->&tbl->lock ->&pn->hash_lock FD: 884 BD: 12 +.+.: &devlink->lock_key#8 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#14 ->&data->lock ->pcpu_alloc_mutex ->&____s->seqcount#2 ->&____s->seqcount ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&rq->__lock ->batched_entropy_u32.lock ->rtnl_mutex ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->mount_lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->netdev_unregistering_wq.lock ->krc.lock ->&dir->lock#2 ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->rcu_state.barrier_mutex.wait_lock ->(work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->(work_completion)(&data->fib_flush_work) ->(work_completion)(&data->fib_event_work) ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->&x->wait#10 ->pcpu_lock ->®ion->snapshot_lock ->stock_lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg1#15 ->(work_completion)(&peer->transmit_handshake_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg0#16 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg1#16 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2#15 ->(work_completion)(&peer->transmit_handshake_work) FD: 166 BD: 1 +.+.: (wq_completion)wg-kex-wg2#16 ->&rq->__lock ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 167 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 31 BD: 75 +.+.: (wq_completion)phy21 ->&rq->__lock ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 75 +.+.: (wq_completion)phy22 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 76 +...: _xmit_PIMREG#2 FD: 1 BD: 75 +...: &token_hash[i].lock FD: 1 BD: 72 +...: &c->lock#2 FD: 1 BD: 16 +.+.: &pool->flush_lock FD: 1 BD: 1 ....: _rs.lock#22 FD: 1 BD: 4 +.+.: cmtp_sk_list.lock FD: 29 BD: 1 +.+.: sk_lock-AF_KEY ->&rq->__lock ->slock-AF_KEY FD: 1 BD: 2 +...: slock-AF_KEY FD: 1 BD: 22 +.+.: (work_completion)(&(&hdev->rpa_expired)->work) FD: 1 BD: 22 +.+.: (work_completion)(&(&hdev->discov_off)->work) FD: 1 BD: 22 +.+.: (work_completion)(&(&hdev->service_cache)->work) FD: 29 BD: 9 +.+.: &id_priv->handler_mutex ->&rq->__lock ->&id_priv->lock FD: 35 BD: 2 +.+.: (work_completion)(&aux->work)#2 ->&aux->poke_mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->map_idr_lock FD: 28 BD: 3 +.+.: &aux->poke_mutex ->&rq->__lock FD: 1 BD: 9 ....: &x->wait#27 FD: 37 BD: 7 +.+.: &tc->t_conn_path_lock ->clock-AF_INET6 ->&cp->cp_lock ->&rq->__lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 120 +...: rds_tcp_tc_list_lock FD: 39 BD: 2 +.+.: &pfk->dump_lock ->&net->xfrm.xfrm_policy_lock ->&rq->__lock ->&c->lock ->pool_lock#2 ->rlock-AF_KEY ->&data->lock ->&obj_hash[i].lock FD: 1 BD: 3 +.+.: (work_completion)(&smc->tcp_listen_work) FD: 1 BD: 12 ....: rdma_nets_rwsem.wait_lock FD: 279 BD: 3 +.+.: sk_lock-AF_SMC/1 ->slock-AF_SMC ->k-clock-AF_INET ->k-sk_lock-AF_INET ->k-slock-AF_INET FD: 145 BD: 1 .+.+: kn->active#64 ->fs_reclaim ->&c->lock ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 178 +.+.: gdp_mutex.wait_lock FD: 2 BD: 238 +.+.: &tn->idr_lock ->pool_lock#2 FD: 33 BD: 1 ..-.: &(&bond->mcast_work)->timer FD: 21 BD: 1 +.+.: (wq_completion)phy28 ->(work_completion)(&local->reconfig_filter) FD: 91 BD: 1 +.-.: (&peer->hb_timer) ->slock-AF_INET6 FD: 233 BD: 1 +.+.: (wq_completion)bond1#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 148 ....: crypto_alg_sem.wait_lock FD: 136 BD: 2 +.+.: (work_completion)(&crct10dif_rehash_work) ->crc_t10dif_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 26 ....: namespace_sem.wait_lock FD: 6 BD: 1 ....: &trie->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->stock_lock FD: 1 BD: 11 ....: devices_rwsem.wait_lock FD: 1 BD: 1 +...: &ipvs->sync_lock FD: 1 BD: 13 ....: &x->wait#29 FD: 1 BD: 13 ....: &x->wait#28 FD: 1 BD: 1 ....: _rs.lock#23 FD: 1 BD: 4 +...: &sap->sk_lock FD: 1 BD: 4 .+..: x25_route_list_lock FD: 46 BD: 3425 +...: &dev_addr_list_lock_key#3/2 ->&dev_addr_list_lock_key/1 ->&c->lock ->&obj_hash[i].lock ->krc.lock FD: 31 BD: 1 ..-.: &(&l->destroy_dwork)->timer FD: 1 BD: 2 +.+.: epnested_mutex.wait_lock FD: 1 BD: 3427 +.-.: &list->lock#46 FD: 1 BD: 76 +...: &qdisc_xmit_lock_key#5 FD: 129 BD: 72 +.+.: &tn->idrinfo->lock#4 ->fs_reclaim ->pool_lock#2 FD: 140 BD: 72 +.+.: zones_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u32.lock ->&base->lock ->&rq->__lock ->flowtable_lock FD: 31 BD: 83 ..-.: &(&flowtable->gc_work)->timer FD: 1 BD: 83 +.+.: (wq_completion)nf_ft_offload_add FD: 1 BD: 83 +.+.: (wq_completion)nf_ft_offload_del FD: 28 BD: 83 +.+.: (wq_completion)nf_ft_offload_stats ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond1#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond2#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond3#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond4#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond2#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond3#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond14 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 885 BD: 1 +.+.: (wq_completion)smc_hs_wq ->(work_completion)(&smc->connect_work) FD: 233 BD: 1 +.+.: (wq_completion)bond4#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond13 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond15 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond16 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 304 BD: 1 .+.+: kn->active#65 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock ->&c->lock ->&n->list_lock FD: 233 BD: 1 +.+.: (wq_completion)bond17 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 143 ....: &dl_b->lock FD: 233 BD: 1 +.+.: (wq_completion)bond12 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond18 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond13#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond19 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond10 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond7 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond14#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond20 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond11 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond15#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond8 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond21 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond12#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond16#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond9 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond22 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond17#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond10#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond23 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond18#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond24 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond19#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond20#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond21#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond23#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond22#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond24#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond25 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond17#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond19#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond20#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond18#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond21#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond19#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond22#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond20#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond3#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond23#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond21#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond4#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond24#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond5#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond7#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 77 ....: &pool->xsk_tx_list_lock FD: 157 BD: 1 +.+.: (wq_completion)bond34 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond35 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond36 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond37 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond39 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond40 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond41 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond42 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond43 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond35#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond44 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond36#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond37#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond38 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond39#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond40#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond41#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond42#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond45 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond43#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond44#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond46 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond34#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond32 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond35#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond33 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond36#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond35#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond37#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond36#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond38#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond37#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond39#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond38#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond40#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond39#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond41#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond40#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond42#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond41#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond43#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond42#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond44#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond45#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond23#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond24#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond25#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond26 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond27 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond28 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond29 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond30 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond31 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond32#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond33#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond34#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond72 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond74 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond80 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond76 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond68 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond77 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond70 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond69 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond71 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond83 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond78 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond79 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond73 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond80#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond74#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond71#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond84 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond76#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond72#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond85 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond86 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond77#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond87 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond88 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond78#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond79#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond80#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond75 ->(work_completion)(&(&slave->notify_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond59 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond91 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond78#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond85#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond92 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond79#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond60 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond86#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond93 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond80#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond84#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond61 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond63 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond62 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond87#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond88#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond85#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond84#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond89 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond7#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond86#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond96 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond85#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond8#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond87#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond97 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond86#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond91#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond9#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond88#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond98 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond87#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond92#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond10#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond89#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond99 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 145 ....: key#28 FD: 233 BD: 1 +.+.: (wq_completion)bond88#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond93#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond11#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond90 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond100 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond89#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond94 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond12#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond91#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond101 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond90#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond95 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond13#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond92#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond102 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond91#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond96#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond14#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond93#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond103 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond92#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond97#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond15#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond94#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond104 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond93#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond98#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond16#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond95#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond105 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond94#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond99#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond17#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond96#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond106 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond95#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond100#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond18#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond97#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond107 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond96#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond101#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond19#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond98#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond108 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond97#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond102#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond20#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond99#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond109 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond98#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond103#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond21#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond100#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond110 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond99#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond104#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond22#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond101#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond111 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond100#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond105#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond23#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond102#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond112 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond101#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond24#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond106#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond103#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond113 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond102#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond25#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond107#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond104#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond114 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond103#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond26#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond108#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond105#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond115 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond104#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond27#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond109#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond105#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond28#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond110#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond107#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond106#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond29#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond111#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond108#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond107#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond30#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond112#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond109#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond108#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond31#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond113#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond110#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond109#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond32#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond114#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond111#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond110#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond33#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond115#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond112#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond111#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond34#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond116 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond113#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond112#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond35#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond117 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond125 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond114#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond113#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond36#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond118 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond126 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond115#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond114#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond37#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond119 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond127 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond116#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond38#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond115#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond120 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond128 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond117#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond39#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond116#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond121 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond129 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond118#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond40#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond117#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond122 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond130 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond119#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond41#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond118#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond123 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond131 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond120#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond42#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond124 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond119#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond132 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond121#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond43#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond125#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond120#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond133 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond122#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond44#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond126#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond121#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond134 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond123#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond45#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond127#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond122#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond135 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond124#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond46#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond128#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond123#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond136 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond125#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond47 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond129#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond124#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond137 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond126#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond48 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond130#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond125#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond138 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond127#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond49 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond131#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond139 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond50 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond132#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond127#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond140 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond51 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond133#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond128#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond141 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond52 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond129#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond142 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond53 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond130#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond143 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond54 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond131#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond144 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond55 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond132#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond145 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond56 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond133#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond146 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond57 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond137#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond134#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond147 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond58 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond138#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond135#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond148 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond59#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond139#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond136#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond60#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond137#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond61#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond141#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond140#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond138#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond142#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond149 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond62#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond141#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond139#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond150 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond63#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond142#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond140#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond143#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond151 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond64 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond143#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond141#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond144#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond152 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond65 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond153 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond145#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond66 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond144#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond142#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond154 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond146#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond145#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond67 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond147#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond146#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond68#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond143#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond156 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond148#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond147#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond69#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond144#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond157 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond149#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond148#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond70#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond158 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond150#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond149#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond71#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond145#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond159 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond150#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond72#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond146#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond160 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond151#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond73#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond147#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond161 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond152#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond74#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond148#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond162 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond151#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond75#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond163 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond152#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond153#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond76#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond164 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond149#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond153#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond154#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond165 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond150#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond154#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond155 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond77#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond166 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond151#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond155#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond156#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond78#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond167 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond156#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond152#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond157#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond168 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond157#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond153#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond158#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond79#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond154#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond159#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond80#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond169 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond158#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond155#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond170 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond159#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond156#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond171 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond160#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond157#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond160#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond81 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond161#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond158#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 3 +...: data_sockets.lock FD: 31 BD: 3 +.+.: sk_lock-AF_ISDN ->slock-AF_ISDN ->clock-AF_ISDN ->rlock-AF_ISDN ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_ISDN FD: 1 BD: 4 ....: rlock-AF_ISDN FD: 233 BD: 1 +.+.: (wq_completion)bond82 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond161#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond173 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond83#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond159#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond162#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond160#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond162#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond84#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond163#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond174 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond161#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond163#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond85#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond175 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond164#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond162#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond164#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond86#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond176 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond165#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond163#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond165#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond87#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond177 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond166#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond164#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond165#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond166#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond88#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond178 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond168#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond166#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond167#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond169#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond167#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond168#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond90#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond170#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond168#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond169#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond180 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond91#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond171#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond169#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond170#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond181 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond92#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond172 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond170#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond171#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond182 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond93#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond173#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond171#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond172#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond94#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond174#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond172#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond173#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond95#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond175#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond173#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond174#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond176#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond96#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond174#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond175#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond187 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond97#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond177#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond175#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond176#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond188 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond98#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond178#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond177#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond176#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond99#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond179 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond177#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond190 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond100#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond180#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond178#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond178#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond101#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond181#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond179#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond179#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond102#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond182#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond180#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond192 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond103#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond183 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond181#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond180#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond193 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond184 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond104#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond185 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond105#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond182#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond181#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond194 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond186 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond183#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond182#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond195 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond187#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond106#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond184#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond183#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond196 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond188#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond107#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond185#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond184#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond197 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond189 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond108#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond185#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond186#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond198 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond190#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond109#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond187#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond191 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond200 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond110#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond186#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond192#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond187#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond188#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond188#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond193#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond189#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond201 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond111#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond194#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond202 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond112#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond189#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond195#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond190#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond203 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond113#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond190#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond196#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond191#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond204 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond114#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond191#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond197#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond192#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond205 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond115#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond192#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond198#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond206 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond199 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond207 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond193#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond116#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond193#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond208 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond194#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond117#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond194#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond200#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond209 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond195#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond118#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond195#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond201#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond196#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond119#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond202#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond197#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond120#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond196#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond203#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond198#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond197#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond204#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond199#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond198#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond205#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond200#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond121#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond199#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond200#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond201#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 233 BD: 1 +.+.: (wq_completion)bond206#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond207#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond202#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond122#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond201#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond208#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond213 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond203#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond123#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond202#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond209#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond214 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond204#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond124#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond203#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond205#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond125#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond204#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond210 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond206#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond126#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond205#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond216 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond127#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond206#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond211 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond217 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond207#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond128#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond207#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond212 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond218 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond208#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond129#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond208#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond213#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond219 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond209#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond130#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond209#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond220 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond210#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond210#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond214#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond221 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond211#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 233 BD: 1 +.+.: (wq_completion)bond131#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond211#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond215 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond222 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond212#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond132#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond212#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond216#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond213#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond133#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond223 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond214#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond134#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond213#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond217#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond224 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond215#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond135#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond218#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond225 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond216#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond136#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond214#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond219#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond226 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond217#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond137#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond215#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond220#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond218#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond221#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond227 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond138#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond219#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond222#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond216#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond139#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond220#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond223#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond217#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond228 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond221#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond218#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond140#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond229 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond222#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond225#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond141#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond223#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond226#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond227#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond219#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond142#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond230 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond224#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond220#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond143#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond231 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond225#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond228#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond232 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond229#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond144#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond233 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond230#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond234 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond226#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond231#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond221#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond145#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond235 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond227#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond232#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond146#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond236 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond228#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond147#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond237 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond229#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond238 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond230#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond233#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond231#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond234#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond222#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond148#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond232#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond235#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond233#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond236#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond223#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond234#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond237#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond224#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond149#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond235#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond238#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond225#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond150#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond236#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond239 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond237#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond226#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond227#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond240 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond151#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond238#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond228#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond152#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond239#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond242 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond229#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond153#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond230#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond154#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond240#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond243 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond155#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond241 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond244 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond156#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond231#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond245 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond242#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond157#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond232#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond246 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond243#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond158#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond233#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond247 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond244#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond159#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond234#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond248 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond245#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond160#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond235#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond249 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond246#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond250 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond247#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond161#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond236#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond251 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond162#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond237#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond248#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond163#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond238#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond253 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond164#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond239#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond254 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond249#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond165#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond240#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond255 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond250#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond256 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond241#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond256#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond251#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond257 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond166#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond257#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond258 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond242#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond258#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond252 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond259 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond253#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond259#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond243#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond260 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond254#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond260#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond244#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond167#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond255#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond261 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond168#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond261#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond256#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond262 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond245#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond169#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond262#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond257#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond263 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond246#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond170#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond263#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond264 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond171#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond264#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond265 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond259#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond265#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond260#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond266 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond247#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond266#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond267 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond248#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond172#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond267#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond262#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond268 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond249#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond173#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond268#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond263#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond174#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond269 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond264#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond269#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond250#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond270 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond175#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond271 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond270#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond265#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond251#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond176#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond272 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond273 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond177#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond271#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond252#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond266#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond272#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond253#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 75 +.+.: tcpv6_prot_mutex FD: 1 BD: 75 +...: device_spinlock FD: 233 BD: 1 +.+.: (wq_completion)bond178#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond274 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond179#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond275 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond273#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond254#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 2 BD: 2 +.+.: &(&net->xfrm.policy_hthresh.lock)->lock ->&____s->seqcount#17 FD: 1 BD: 6 +.+.: &____s->seqcount#17 FD: 39 BD: 2 +.+.: (work_completion)(&net->xfrm.policy_hthresh.work) ->hash_resize_mutex FD: 38 BD: 3 +.+.: hash_resize_mutex ->&____s->seqcount#17 ->&net->xfrm.xfrm_policy_lock FD: 233 BD: 1 +.+.: (wq_completion)bond268#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond180#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond255#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond181#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond277 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond256#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond275#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond269#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond257#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond258#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond271#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond183#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond279 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond259#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond272#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond184#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond273#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond185#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond281 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond186#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond282 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond274#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond275#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond276 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond260#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond188#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond284 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond277#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond261#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond281#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond262#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond189#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond285 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond278 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond263#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond282#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond190#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond279#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond264#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond283 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond191#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond286 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond280 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond265#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond284#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond192#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond281#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond285#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond193#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond287 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond194#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond288 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond284#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond289 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond266#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond286#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond195#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond196#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond197#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond286#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond290 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond267#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond288#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond198#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond268#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond199#4 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond269#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond200#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond288#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond292 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond270#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond201#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond289#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond293 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond294 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond271#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond202#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond295 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond291 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond292#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond296 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond272#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond203#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond293#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond297 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond273#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond274#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond204#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond298 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond289#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond275#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond205#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond299 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond276#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond206#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond294#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond277#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond208#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond295#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond278#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond209#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond296#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond279#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond300 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond210#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond297#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond280#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond301 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond211#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond298#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond281#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond302 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond212#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond213#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond282#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond214#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond283#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond299#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond215#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond284#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond303 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond216#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond300#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond285#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond304 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond291#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond217#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond301#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond286#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond218#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond302#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond287#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond305 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond219#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond303#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond220#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond288#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond306 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond305#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond289#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond307 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond306#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond221#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond290#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond294#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond307#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond291#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond308 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond308#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond222#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond292#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond309 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond309#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond293#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond223#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond310 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond295#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond294#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond311 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond295#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond224#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond310#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond312 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond296#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond225#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond311#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond313 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond297#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond226#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond298#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond227#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond315 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond299#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond316 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond300#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond228#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond313#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond229#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond314 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond317 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond301#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond230#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond65#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond302#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond231#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond318 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond303#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond232#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond315#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond233#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond316#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond319 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond234#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond317#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond66#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond305#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond235#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond67#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond320 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond236#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond68#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond297#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond321 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond306#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond69#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond307#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond70#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond322 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond308#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond237#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond323 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond309#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond238#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond71#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond239#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond319#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond240#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond72#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond320#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond310#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond321#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond325 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond241#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond322#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond299#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond73#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond311#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond242#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond323#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond324 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond300#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond312#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond325#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond74#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond328 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond243#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond75#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond76#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond329 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond327 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond326 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond244#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond245#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond77#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond313#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond328#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond246#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond331 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond314#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond329#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond332 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond78#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond315#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond330 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond333 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond79#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond316#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond331#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond332#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond334 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond80#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond335 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond317#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond333#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond247#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond81#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond334#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond248#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond82#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond318#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond335#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond83#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond336 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond249#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond336#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond319#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond337 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond250#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond320#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond338 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond251#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond337#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond339 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond338#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond84#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond321#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond340 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond252#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond339#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond322#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond253#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond340#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond85#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond323#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond341 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond254#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond341#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond342 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond342#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond86#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond324#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond343 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond255#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond343#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond87#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond325#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond256#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond344 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond88#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond257#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond345 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond89#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond326#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond327#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond258#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond346 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond344#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond345#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond346#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond347 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond328#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond347#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond259#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond92#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond348 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond260#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond349 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond350 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond351 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond329#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond330#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond348#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond304#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond261#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond262#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond93#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond331#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond349#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond350#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond305#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond351#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond263#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond94#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond352 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond332#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond352#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond95#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond264#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond353 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond333#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond265#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond354 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond334#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond353#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond266#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond267#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond355 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond335#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond354#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond268#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond306#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond96#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond356 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond336#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond355#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond337#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond307#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond97#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond98#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond357 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond338#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond358 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond356#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond339#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond357#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond269#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond99#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond341#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond100#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond358#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond270#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond359 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond271#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond343#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond360 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond344#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond309#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond361 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond272#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond345#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond362 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond273#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond346#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond363 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond274#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond347#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond101#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond364 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond275#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond102#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond365 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond276#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond103#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond366 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond277#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond367 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond278#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond348#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond279#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond368 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond104#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond349#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond280#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond369 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond350#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond370 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond351#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond310#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond281#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond371 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond105#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond352#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond282#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond106#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond353#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond311#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond283#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond372 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond107#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond284#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond373 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond108#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond354#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond285#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond286#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond109#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond110#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond355#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond287#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond111#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond356#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond288#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond112#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond357#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond289#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond113#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond358#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond290#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond114#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond291#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond115#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond359#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond292#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond360#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond293#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond361#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond294#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond313#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond362#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond118#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond295#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond296#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond363#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond120#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond297#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond121#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond298#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond364#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond314#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond299#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond365#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond122#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond300#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond366#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond123#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond301#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond367#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond302#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond303#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond368#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond304#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond126#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond305#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond306#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond369#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond370#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond127#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond307#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond308#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond371#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond128#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond309#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond372#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond373#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond310#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond374 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond311#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond312#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond375 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond132#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond313#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond376 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond317#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond314#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond377 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond133#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond315#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond378 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond134#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond379 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond318#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond316#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond135#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond319#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond317#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond380 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond136#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond318#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond381 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond397 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond137#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond319#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond398 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond320#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond399 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond138#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond400 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 233 BD: 1 +.+.: (wq_completion)bond139#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond321#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond382 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond320#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond140#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond383 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond321#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond401 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond141#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond384 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond322#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond385 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond402 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond142#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond143#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond386 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond323#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond144#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond324#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond404 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond145#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond325#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond325#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond146#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond405 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond387 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond326#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond326#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond147#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond388 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond327#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond148#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond328#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond149#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond406 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond329#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond150#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond407 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond151#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond330#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond152#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond408 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond389 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond153#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond390 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond331#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond391 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond332#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond329#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond409 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond155#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond410 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond392 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond156#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond157#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond411 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond393 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond334#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond158#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond159#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond394 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond395 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond406#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond412 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond396 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond335#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond413 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond397#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond336#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond414 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond160#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond407#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond415 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond408#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond161#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond331#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond416 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond417 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond409#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond162#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 ->&rcu_state.expedited_wq FD: 233 BD: 1 +.+.: (wq_completion)bond410#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 6 +...: &pernet->lock FD: 233 BD: 1 +.+.: (wq_completion)bond398#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond337#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond399#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond338#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond163#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond418 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond400#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond411#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond401#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond412#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond339#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond332#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond164#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond165#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond419 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond166#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond402#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond413#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond167#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond420 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond403 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond414#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond421 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond415#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond340#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond168#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond422 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond423 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond416#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond424 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond169#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond417#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond404#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond170#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond425 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond171#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond426 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond172#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond405#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond341#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond173#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond427 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond174#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond342#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond343#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond406#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond335#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond344#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond407#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 ->&rcu_state.expedited_wq FD: 233 BD: 1 +.+.: (wq_completion)bond418#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond345#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond419#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond429 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond420#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond175#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond408#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond430 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond421#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond176#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond409#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond346#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond422#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond336#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond177#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond423#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond178#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond347#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond431 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond179#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond410#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond348#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond411#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond349#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond412#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond424#2 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond350#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond351#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond433 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond180#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond434 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond352#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond435 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond414#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond340#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond415#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond425#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond426#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond417#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond427#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond418#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond428 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond353#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond419#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond429#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond354#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond420#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond430#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond421#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond355#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond422#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond356#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond423#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond432 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond357#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond440 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond424#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond343#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond441 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond442 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond425#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond344#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond443 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond444 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond426#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond433#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond358#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond359#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond445 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond427#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond434#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond428#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond360#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond446 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond429#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond430#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond436 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond431#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond437 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond361#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond448 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond432#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond438 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond362#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond439 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond363#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond450 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond440#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond433#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond441#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond434#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond364#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond451 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 4 BD: 3 +.+.: &rng->jent_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 233 BD: 1 +.+.: (wq_completion)bond442#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond365#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond443#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond366#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond444#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond435#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond367#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond453 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond368#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond436#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond437#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond445#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond369#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond370#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond438#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond439#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond454 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond371#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond372#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond440#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond373#3 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond441#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond374#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond442#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond375#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond376#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond455 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond443#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond348#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond456 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond444#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond349#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond377#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond350#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond378#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond181#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond379#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond182#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond445#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond456#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond351#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond380#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond457 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond381#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond183#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond446#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond184#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond382#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond185#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond447 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond458 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond383#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond186#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond187#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond448#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond353#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond384#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond449 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond354#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond450#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond355#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond459 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond385#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond460 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond451#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond386#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond461 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond462 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond463 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond387#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond452 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond464 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond388#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond356#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond357#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond453#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond189#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond454#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond190#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond389#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond358#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond455#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond390#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond191#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond391#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond456#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond359#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond192#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond457#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond392#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond458#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond360#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond194#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond193#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond459#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond393#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond466 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond361#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond460#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond195#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond460#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond362#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond196#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond461#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond461#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond462#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond363#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond394#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond364#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond395#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond463#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond396#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond397#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond464#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond398#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond465 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond197#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond466#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond198#6 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond199#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond467 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond365#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond471 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond200#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond399#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond366#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond468 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond201#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond368#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond367#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond202#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond369#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond469 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond400#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond472 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond370#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond203#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond470 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond401#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond473 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond371#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond204#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond471#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond372#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond402#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond205#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond472#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond463#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond403#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond373#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond473#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond206#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond404#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond374#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond464#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond474 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond207#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond405#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond475 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond208#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond406#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond465#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond476 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond375#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond407#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond477 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond376#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond210#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond408#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond478 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond211#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond377#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond409#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond479 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond378#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond212#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond410#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond480 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond213#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond411#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond481 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond379#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond214#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond412#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond482 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond380#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond215#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond413#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond483 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond381#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond414#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond484 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond382#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond467#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond216#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond415#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond485 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond383#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond468#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond479#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond486 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond384#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond487 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond385#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond217#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond480#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond488 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond386#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond218#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond417#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond489 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond219#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond418#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond490 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond388#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond220#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond419#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond491 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond221#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond420#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond492 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond389#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond421#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond493 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond390#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond222#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond422#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond494 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond391#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond423#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond223#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond495 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond469#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond392#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond424#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond224#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond470#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond425#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond225#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond496 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond393#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond426#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond226#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond394#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond227#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond395#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond482#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond427#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond497 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 ->&rcu_state.expedited_wq FD: 233 BD: 1 +.+.: (wq_completion)bond428#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond472#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond228#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond396#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond429#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond229#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond498 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond430#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond474#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond230#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond499 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond431#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond397#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond475#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond231#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond432#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond483#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond476#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond232#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond500 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond484#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond398#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond433#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond233#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond501 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond434#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond399#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond234#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond400#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond235#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond502 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond485#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond401#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond435#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond236#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond478#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond503 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond402#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond436#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond486#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond504 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond437#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond505 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond438#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond238#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond403#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond506 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond404#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond507 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond439#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond487#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond405#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond488#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond239#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond406#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond441#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond489#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond407#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond442#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond408#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond409#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond443#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond243#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond483#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond444#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond244#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond445#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond245#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond484#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond446#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond246#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond490#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond447#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond485#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond491#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond448#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond486#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond449#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond487#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond488#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond450#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond451#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond489#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond452#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond490#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond453#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond454#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond493#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond455#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond456#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond457#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond458#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond459#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond461#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond493#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond462#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond494#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond463#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond464#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond465#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond466#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond495#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond467#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond468#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond469#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond470#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond471#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond472#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond541 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond473#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond544 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond545 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond546 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond547 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond498#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond499#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond548 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond498#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond549 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond499#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond500#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond550 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond500#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond551 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond501#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond501#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond552 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond502#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond247#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond553 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond502#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond503#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond554 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond503#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond555 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond248#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond556 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond249#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond504#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond557 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond558 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond504#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond560 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond253#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond561 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond254#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond562 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond410#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond563 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond564 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond506#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond257#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond565 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond412#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond505#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond566 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond413#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond567 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond414#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond569 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond570 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond415#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond571 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond506#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond508 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond258#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond572 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond259#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond416#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond573 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond417#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond574 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond575 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond260#6 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond507#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond509 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond418#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond576 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond577 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond508#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond419#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond578 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond511 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond261#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond421#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond262#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond579 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond263#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond422#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond580 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond424#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond423#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond264#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond510 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond581 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond265#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond425#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond511#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond266#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond426#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond512 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond582 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond583 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond267#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond427#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond513 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond428#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond584 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond514 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond429#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond268#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond430#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond514#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond515 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond269#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond432#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond516 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond512#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond585 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond270#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond433#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond586 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond434#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond517 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond587 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond435#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond518 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond517#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond588 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond519 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond436#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond589 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond437#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond520 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond521 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond590 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond522 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond591 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond592 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond523 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond593 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond438#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond516#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond594 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond524 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond595 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond525 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond526 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond596 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond439#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond527 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond440#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond597 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond517#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond528 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond441#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond598 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond529 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond442#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond518#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond530 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond531 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond443#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond599 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond444#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond600 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond446#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond445#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond532 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond447#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond601 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond533 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond534 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond521#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond448#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond602 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond603 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond449#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond604 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond535 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond536 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond605 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond522#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond451#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond606 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond607 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond537 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond452#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond538 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond453#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond608 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond454#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond539 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond523#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 28 BD: 1 +.+.: nfnl_subsys_none ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond609 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond541#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond610 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond611 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond542 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond612 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond543 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond455#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond613 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond456#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond614 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond544#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond457#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond615 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond545#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond616 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond525#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond546#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond458#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond459#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond547#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond618 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond548#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond619 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond460#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond549#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond620 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond461#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond528#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond621 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond550#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond551#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond552#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond622 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond462#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond463#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond553#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond554#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond623 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond464#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond531#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond465#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond532#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond555#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond624 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond276#4 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond466#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond556#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond625 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond467#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond557#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond468#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 157 BD: 1 +.+.: (wq_completion)bond277#5 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond530#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 157 BD: 1 +.+.: (wq_completion)bond278#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond558#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond626 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond534#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond559 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond560#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond279#5 ->(work_completion)(&(&slave->notify_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond533#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond627 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond628 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 157 BD: 1 +.+.: (wq_completion)bond280#4 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond469#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 157 BD: 1 +.+.: (wq_completion)bond281#6 ->(work_completion)(&(&slave->notify_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond629 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond630 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond470#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond561#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond471#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond631 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond536#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond472#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond537#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond473#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond534#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond632 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond562#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond474#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond633 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond538#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond634 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond563#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond475#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond476#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond635 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond539#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond535#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond540 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond536#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond636 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond537#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond637 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond541#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond477#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond538#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond638 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond542#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond478#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond543#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond479#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond564#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond480#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond539#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond540#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond565#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond541#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond639 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond544#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond542#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond543#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond640 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond545#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond481#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond641 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond642 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond544#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond566#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond482#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond643 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond545#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond546#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond644 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond547#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond567#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond645 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond548#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond568 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond646 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond546#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond569#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond547#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond549#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond550#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond570#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond571#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond551#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond552#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond647 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond572#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond648 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond573#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond649 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond574#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond651 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond650 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond575#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond576#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond652 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond577#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond579#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond578#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond653 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond654 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond655 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond580#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond656 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond581#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond657 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond582#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond658 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond583#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond584#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond659 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond585#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond660 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond586#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond587#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond661 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond588#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond662 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond553#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond589#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond663 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond554#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond590#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond664 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond591#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond665 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond592#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond666 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 3 +.+.: event_mutex.wait_lock FD: 233 BD: 1 +.+.: (wq_completion)bond593#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond667 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond555#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond594#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond668 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond556#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond595#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond669 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond596#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond670 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond597#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond671 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond598#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond672 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond599#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond673 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond600#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond674 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond601#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond675 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 130 BD: 127 +.+.: &hugetlb_fault_mutex_table[i] ->&rq->__lock ->hugetlb_lock ->fs_reclaim ->stock_lock ->&c->lock ->pool_lock#2 ->&anon_vma->rwsem ->ptlock_ptr(page) ->&obj_hash[i].lock ->&____s->seqcount FD: 233 BD: 1 +.+.: (wq_completion)bond602#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond676 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond603#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond677 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond604#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond678 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond605#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond679 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond560#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond680 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond561#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond606#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond681 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond562#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond607#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond682 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond563#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond608#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond683 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond609#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond684 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond610#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond685 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond687 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond686 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond612#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond613#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond688 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond614#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond689 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond615#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond690 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond616#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond691 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond617 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond692 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond618#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond693 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond619#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond566#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond694 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond620#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond695 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond621#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond696 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond622#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond697 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond623#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond698 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond625#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond624#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond699 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond700 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond626#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond701 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond627#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond702 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond628#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond703 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond629#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond704 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond630#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond705 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4158 ....: key#29 FD: 233 BD: 1 +.+.: (wq_completion)bond631#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4108 +.+.: &vmpr->sr_lock FD: 233 BD: 1 +.+.: (wq_completion)bond706 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 30 BD: 4108 +.+.: &cache->alloc_lock ->&rq->__lock ->swap_avail_lock ->&p->lock#2 FD: 51 BD: 4108 +.+.: shmem_swaplist_mutex ->&rq->__lock ->&xa->xa_lock#19 ->&info->lock ->&p->lock#2 ->&xa->xa_lock#6 FD: 9 BD: 4109 ....: &xa->xa_lock#19 ->pool_lock#2 ->key#29 ->&ctrl->lock ->&c->lock FD: 1 BD: 4108 +.+.: &tree->lock FD: 1 BD: 4110 ....: &ctrl->lock FD: 233 BD: 1 +.+.: (wq_completion)bond632#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond707 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond569#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4112 +.+.: f2fs_list_lock FD: 77 BD: 1 .+.+: &type->s_umount_key#50 ->&rq->__lock ->&lru->node[i].lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#24 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->&wb->list_lock ->rcu_node_0 ->kernfs_idr_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 233 BD: 1 +.+.: (wq_completion)bond708 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond570#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond633#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond709 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond634#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond710 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond635#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond711 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond636#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 31 BD: 1 ..-.: &(&krcp->page_cache_work)->timer FD: 233 BD: 1 +.+.: (wq_completion)bond712 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond637#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond713 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond638#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond640#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond639#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond714 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond715 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond716 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 3 BD: 4137 ..-.: lock#11 ->&lruvec->lru_lock FD: 233 BD: 1 +.+.: (wq_completion)bond641#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond571#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond642#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond717 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond643#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond718 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond572#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond644#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond719 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond573#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond645#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond720 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond574#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond646#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond721 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond575#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond647#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond722 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond648#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond723 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond649#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond724 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond650#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond725 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond726 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond652#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond727 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond653#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond728 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond654#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond729 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 64 BD: 1 .+.+: &type->s_umount_key#51 ->&lru->node[i].lock ->&rq->__lock ->&dentry->d_lock ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#30 ->&s->s_inode_list_lock ->&xa->xa_lock#6 ->inode_hash_lock ->&fsnotify_mark_srcu FD: 233 BD: 1 +.+.: (wq_completion)bond655#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond730 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond576#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond731 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond577#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond656#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond732 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond578#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond657#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond733 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 33 BD: 1 +.+.: (wq_completion)bond579#3 ->(work_completion)(&(&bond->alb_work)->work) ->(work_completion)(&(&bond->mii_work)->work) ->&rq->__lock FD: 33 BD: 1 ..-.: &(&bond->alb_work)->timer FD: 233 BD: 1 +.+.: (wq_completion)bond658#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond734 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond659#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond735 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond660#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond736 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond661#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond737 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond662#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond738 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond663#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond739 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond664#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond740 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond666#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond665#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond741 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond742 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond667#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond743 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond668#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond581#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond744 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond669#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond745 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond670#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond582#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond746 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond671#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond583#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond747 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond672#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond748 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond584#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond673#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond749 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond674#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond750 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond675#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond751 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond676#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond752 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond677#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond678#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond753 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond679#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond680#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond755 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond681#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond587#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond756 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond682#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond757 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond683#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond758 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond684#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond759 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond685#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond760 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond686#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond589#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond761 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond687#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond762 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond688#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond689#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond764 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond690#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond590#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond765 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond691#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond766 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond692#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond767 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond693#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond768 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond694#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond769 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond695#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond770 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond696#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond771 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond697#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond772 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond698#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond773 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond699#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond774 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond700#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond775 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond701#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond776 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond702#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond777 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond703#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond778 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond704#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond779 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond780 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond781 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond707#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond593#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond708#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond782 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond594#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond709#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond595#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond710#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond783 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond596#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond711#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond784 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond597#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond712#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond785 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond713#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond786 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond714#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond787 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond788 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond715#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond789 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond716#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond790 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond717#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond791 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond718#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond792 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond719#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond793 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond794 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond720#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond795 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond721#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond796 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond722#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond797 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond723#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond798 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond724#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond799 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond725#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond800 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond726#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond801 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond727#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond802 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond728#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond803 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond729#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond804 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond730#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond805 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond731#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond806 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond732#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond807 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond733#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond808 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond734#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond809 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond735#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond810 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 233 BD: 1 +.+.: (wq_completion)bond736#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond811 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond737#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond812 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond738#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond813 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond739#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond814 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond740#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond815 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond741#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond816 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond742#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond817 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond743#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond818 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond744#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond819 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond745#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond820 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond746#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond821 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond747#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond822 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond748#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond823 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond749#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond824 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond750#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond751#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond826 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond752#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond827 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond753#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond828 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond754 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond829 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond755#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond830 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond756#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond831 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond757#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond832 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond758#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond833 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond759#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond834 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond760#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond835 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond761#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond836 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond762#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond837 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond763 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond838 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond764#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond839 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond765#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond840 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond766#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond841 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond767#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond842 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond768#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond843 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond769#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond844 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond770#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond845 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond771#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond846 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond772#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond847 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond773#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond848 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond774#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond849 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond775#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond850 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond776#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond851 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond777#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond852 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond778#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond853 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond779#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond854 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond855 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond780#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond856 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond781#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond857 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond782#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond783#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond858 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond784#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond785#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond786#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond859 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond860 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond787#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond861 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond788#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond862 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond789#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond863 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond790#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond864 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond791#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond865 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond792#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond866 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond793#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond867 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond794#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond868 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond795#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4 +.+.: bnep_sk_list.lock FD: 233 BD: 1 +.+.: (wq_completion)bond869 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond870 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond796#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond871 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond797#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond872 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond798#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond873 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond799#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond874 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond800#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond801#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond875 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond876 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond803#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond877 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond878 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond804#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond879 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond880 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond805#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond881 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond806#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond882 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond807#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond808#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond809#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond883 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond884 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond885 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond886 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond810#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond887 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond811#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond888 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond889 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond812#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond890 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond813#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond891 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond814#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond815#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond816#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond892 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond817#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond893 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond818#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond894 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond820#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond895 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond821#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond896 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond822#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond897 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond823#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond824#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond898 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond825 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond899 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond826#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond900 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond827#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond901 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond828#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond631#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond829#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond902 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond830#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond903 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond831#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond904 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond832#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond833#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond906 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond834#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond634#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond907 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond835#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond635#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond908 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond836#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond636#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond837#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond637#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond909 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond910 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond838#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond638#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond839#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond639#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond911 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond840#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond640#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond641#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond912 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond841#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond642#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond913 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond842#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond843#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond643#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond914 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond915 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond844#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond916 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond845#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond917 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond846#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond918 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond847#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond919 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond848#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond920 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond849#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond921 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond850#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond922 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond647#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond923 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond648#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond851#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond649#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond924 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond852#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond853#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond925 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond854#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond855#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond926 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond927 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond856#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond928 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond857#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond929 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond858#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond930 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond859#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond931 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond932 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond860#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond861#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond862#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond933 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond863#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond934 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond651#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond935 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond652#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond864#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond936 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond653#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond865#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond937 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond866#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond938 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond867#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond868#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond939 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond869#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond870#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond654#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond655#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond656#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond940 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond941 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond871#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond872#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond942 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond873#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond657#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond874#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond875#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond658#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond943 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond876#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond944 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond945 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond946 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond877#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond947 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond878#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond948 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond949 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond879#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond950 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond951 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond880#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond952 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond881#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond953 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond954 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond882#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond955 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond956 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond957 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond883#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond958 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond884#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond885#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond959 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond886#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond960 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond887#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond961 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond888#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond889#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond962 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond890#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond963 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond891#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond964 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond892#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond965 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond893#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond966 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond894#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond895#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond967 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond896#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond897#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond968 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond898#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond969 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond970 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond899#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond971 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond900#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond972 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond901#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond973 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond974 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond975 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond902#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond976 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond903#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond904#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond977 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond905 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond978 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond906#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond979 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond907#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond980 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond908#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond981 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond909#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond982 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond983 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond910#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond911#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond984 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond912#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond985 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond913#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond986 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond914#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond987 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond915#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond988 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond916#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond989 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond917#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond990 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond991 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond918#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond992 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond919#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond993 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond994 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond920#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond995 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond921#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond996 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond997 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond922#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond998 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond923#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond999 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond924#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1000 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond926#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond925#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1001 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond928#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond927#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond929#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1002 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond930#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond931#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1003 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond932#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond933#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond934#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1004 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1005 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1006 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond936#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1007 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond937#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1009 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1008 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond938#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1010 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond939#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond941#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1011 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond940#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1012 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond942#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1013 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond943#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1014 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond944#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1015 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond945#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1016 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond946#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1017 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond947#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1018 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond948#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1019 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond949#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1020 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond950#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1021 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond951#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond952#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1022 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond953#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1023 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond954#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1024 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond955#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1025 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond956#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1026 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1027 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond957#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1028 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond958#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond959#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1029 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond960#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1030 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond961#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1032 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1031 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond962#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond963#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond965#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond964#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1033 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond966#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1034 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond967#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1035 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1036 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond969#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1037 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond970#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1038 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond971#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1039 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond972#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1040 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond973#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1041 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond974#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1042 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond975#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1043 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond976#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1044 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond977#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1045 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: _rs.lock#24 FD: 233 BD: 1 +.+.: (wq_completion)bond978#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1046 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond979#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1047 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond980#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1048 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond981#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1049 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond982#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1050 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond983#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1051 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond984#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1052 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond985#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1053 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond986#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1054 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond987#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1055 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond988#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1056 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1057 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond989#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1058 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond990#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1060 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond991#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1059 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond992#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1062 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1061 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond993#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond994#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond492#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1063 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond995#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1064 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond996#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1065 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond997#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1066 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond998#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1067 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond999#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1068 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1000#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1069 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1001#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 131 BD: 2 +.+.: &sdata->lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->krc.lock ->&c->lock FD: 233 BD: 1 +.+.: (wq_completion)bond1070 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1002#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1071 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1003#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1072 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1004#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1073 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1005#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1074 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1006#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1075 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1007#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1076 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1077 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1008#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1078 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1010#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1009#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1079 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1080 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1011#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1081 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1012#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1082 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1013#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1083 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1014#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1084 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1015#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1085 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1016#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1017#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1087 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1018#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1088 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1019#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1089 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1020#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1090 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1021#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1091 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1022#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1092 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1023#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1093 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1024#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1094 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1025#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1095 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1026#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1096 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1027#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1097 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1028#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1098 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1029#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1099 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1030#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1100 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1031#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1101 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1032#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond506#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1102 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond507#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1103 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1104 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond508#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1033#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1105 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1106 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1107 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1034#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1035#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1108 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1036#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1109 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1037#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1110 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1111 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1038#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1112 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1039#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1113 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1040#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1114 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1041#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1115 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1042#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1116 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1043#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1117 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1044#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1118 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1045#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1119 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1046#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1120 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1047#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1121 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1048#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1122 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1123 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1049#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1124 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1050#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1125 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1051#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1126 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1052#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 233 BD: 1 +.+.: (wq_completion)bond1127 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1128 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1053#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1129 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1054#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1130 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1131 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1132 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1133 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1055#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 233 BD: 1 +.+.: (wq_completion)bond1134 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) (buggered) all lock chains: irq_context: 0 &obj_hash[i].lock irq_context: 0 &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex irq_context: 0 (console_sem).lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_hotplug_lock static_call_mutex irq_context: 0 cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 console_mutex irq_context: 0 console_mutex syslog_lock irq_context: 0 console_mutex (console_sem).lock irq_context: 0 console_mutex console_lock console_srcu console_owner_lock irq_context: 0 console_mutex console_lock console_srcu console_owner irq_context: 0 console_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 console_lock console_srcu console_owner_lock irq_context: 0 console_lock console_srcu console_owner irq_context: 0 console_lock console_srcu console_owner console_owner_lock irq_context: 0 input_pool.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 clocksource_mutex irq_context: 0 clocksource_mutex watchdog_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 resource_lock irq_context: 0 cache_disable_lock irq_context: 0 pgd_lock irq_context: 0 init_mm.page_table_lock irq_context: 0 init_mm.page_table_lock pgd_lock irq_context: 0 early_pfn_lock irq_context: 0 acpi_ioapic_lock irq_context: 0 acpi_ioapic_lock ioapic_lock irq_context: 0 acpi_ioapic_lock (console_sem).lock irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 syscore_ops_lock irq_context: 0 map_entries_lock irq_context: 0 devtree_lock irq_context: 0 pcpu_lock irq_context: 0 param_lock irq_context: 0 base_crng.lock irq_context: 0 crng_init_wait.lock irq_context: 0 zonelist_update_seq irq_context: 0 zonelist_update_seq zonelist_update_seq.seqcount irq_context: 0 dmar_global_lock irq_context: 0 &zone->lock irq_context: 0 &zone->lock &____s->seqcount irq_context: 0 &pcp->lock &zone->lock irq_context: 0 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &____s->seqcount irq_context: 0 pool_lock#2 irq_context: 0 pcpu_alloc_mutex irq_context: 0 pcpu_alloc_mutex pcpu_lock irq_context: 0 &n->list_lock irq_context: 0 &c->lock irq_context: 0 slab_mutex irq_context: 0 slab_mutex pool_lock#2 irq_context: 0 slab_mutex &c->lock irq_context: 0 slab_mutex &n->list_lock irq_context: 0 slab_mutex pcpu_alloc_mutex irq_context: 0 slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 batched_entropy_u64.lock irq_context: 0 batched_entropy_u64.lock crngs.lock irq_context: 0 batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 espfix_init_mutex irq_context: 0 espfix_init_mutex &pcp->lock &zone->lock irq_context: 0 espfix_init_mutex &zone->lock irq_context: 0 espfix_init_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 espfix_init_mutex &____s->seqcount irq_context: 0 espfix_init_mutex pool_lock#2 irq_context: 0 percpu_counters_lock irq_context: 0 &mm->page_table_lock irq_context: 0 ptlock_ptr(page) irq_context: 0 ptlock_ptr(page)#2 irq_context: 0 trace_types_lock irq_context: 0 panic_notifier_list.lock irq_context: 0 die_chain.lock irq_context: 0 trace_event_sem irq_context: 0 batched_entropy_u32.lock irq_context: 0 batched_entropy_u32.lock crngs.lock irq_context: 0 &rq->__lock irq_context: 0 &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 init_task.pi_lock irq_context: 0 init_task.pi_lock &rq->__lock irq_context: 0 init_task.vtime_seqcount irq_context: 0 slab_mutex &pcp->lock &zone->lock irq_context: 0 slab_mutex &zone->lock irq_context: 0 slab_mutex &____s->seqcount irq_context: 0 wq_pool_mutex irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 wq_pool_mutex &zone->lock irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 wq_pool_mutex &____s->seqcount irq_context: 0 wq_pool_mutex pool_lock#2 irq_context: 0 wq_pool_mutex &c->lock irq_context: 0 &wq->mutex irq_context: 0 &wq->mutex &pool->lock irq_context: 0 wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 shrinker_rwsem irq_context: 0 rcu_node_0 irq_context: 0 rcu_state.barrier_lock irq_context: 0 rcu_state.barrier_lock rcu_node_0 irq_context: 0 &rnp->exp_poll_lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock pool_lock#2 irq_context: 0 trace_event_sem trace_event_ida.xa_lock &c->lock irq_context: 0 trigger_cmd_mutex irq_context: 0 rcu_read_lock pool_lock#2 irq_context: 0 i8259A_lock irq_context: 0 irq_domain_mutex irq_context: 0 free_vmap_area_lock irq_context: 0 vmap_area_lock irq_context: 0 &irq_desc_lock_class irq_context: 0 vmap_purge_lock irq_context: 0 vmap_purge_lock purge_vmap_area_lock irq_context: 0 cpa_lock irq_context: 0 cpa_lock pgd_lock irq_context: 0 timekeeper_lock irq_context: 0 timekeeper_lock tk_core.seq.seqcount irq_context: 0 timekeeper_lock tk_core.seq.seqcount &obj_hash[i].lock irq_context: 0 tk_core.seq.seqcount irq_context: 0 &base->lock irq_context: 0 &base->lock &obj_hash[i].lock irq_context: 0 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 rcu_read_lock &pool->lock/1 irq_context: 0 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pmus_lock irq_context: 0 pmus_lock pcpu_alloc_mutex irq_context: 0 pmus_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 pmus_lock &obj_hash[i].lock irq_context: 0 pmus_lock pool_lock#2 irq_context: 0 &swhash->hlist_mutex irq_context: 0 pmus_lock &cpuctx_mutex irq_context: 0 pmus_lock &obj_hash[i].lock pool_lock irq_context: 0 tty_ldiscs_lock irq_context: 0 console_lock irq_context: 0 console_lock resource_lock irq_context: 0 console_lock pool_lock#2 irq_context: 0 console_lock &obj_hash[i].lock irq_context: 0 console_lock &pcp->lock &zone->lock irq_context: 0 console_lock &zone->lock irq_context: 0 console_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 console_lock &____s->seqcount irq_context: 0 console_lock &c->lock irq_context: 0 console_lock kbd_event_lock irq_context: 0 console_lock kbd_event_lock led_lock irq_context: 0 console_lock vga_lock irq_context: 0 console_lock (console_sem).lock irq_context: 0 console_lock console_owner_lock irq_context: 0 console_mutex &port_lock_key irq_context: 0 console_mutex console_lock irq_context: 0 console_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 console_mutex console_srcu_srcu_usage.lock irq_context: 0 console_mutex console_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 console_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 console_mutex console_srcu irq_context: 0 console_lock console_srcu console_owner &port_lock_key irq_context: 0 init_task.alloc_lock irq_context: 0 acpi_ioremap_lock irq_context: 0 acpi_ioremap_lock pool_lock#2 irq_context: 0 acpi_ioremap_lock resource_lock irq_context: 0 acpi_ioremap_lock memtype_lock irq_context: 0 acpi_ioremap_lock free_vmap_area_lock irq_context: 0 acpi_ioremap_lock vmap_area_lock irq_context: 0 semaphore->lock irq_context: 0 *(&acpi_gbl_reference_count_lock) irq_context: 0 clockevents_lock irq_context: 0 clockevents_lock tk_core.seq.seqcount irq_context: 0 clockevents_lock tick_broadcast_lock irq_context: 0 clockevents_lock i8253_lock irq_context: 0 &desc->request_mutex irq_context: 0 &desc->request_mutex &irq_desc_lock_class irq_context: 0 &desc->request_mutex &irq_desc_lock_class i8259A_lock irq_context: 0 ioapic_lock irq_context: 0 ioapic_mutex irq_context: 0 ioapic_mutex &domain->mutex irq_context: 0 ioapic_mutex &domain->mutex pool_lock#2 irq_context: 0 ioapic_mutex &domain->mutex vector_lock irq_context: 0 ioapic_mutex &domain->mutex &irq_desc_lock_class irq_context: 0 ioapic_mutex &domain->mutex i8259A_lock irq_context: 0 ioapic_mutex &domain->mutex &c->lock irq_context: 0 ioapic_mutex &domain->mutex &pcp->lock &zone->lock irq_context: 0 ioapic_mutex &domain->mutex &zone->lock irq_context: 0 ioapic_mutex &domain->mutex &____s->seqcount irq_context: 0 vector_lock irq_context: hardirq jiffies_lock irq_context: hardirq jiffies_lock jiffies_seq.seqcount irq_context: hardirq hrtimer_bases.lock irq_context: hardirq hrtimer_bases.lock tk_core.seq.seqcount irq_context: hardirq log_wait.lock irq_context: 0 sysctl_lock irq_context: 0 tomoyo_policy_lock irq_context: 0 tomoyo_policy_lock pool_lock#2 irq_context: 0 aa_secids.xa_lock irq_context: 0 aa_secids.xa_lock pool_lock#2 irq_context: 0 aa_buffers_lock irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex irq_context: 0 rtnl_mutex &c->lock irq_context: 0 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &zone->lock irq_context: 0 rtnl_mutex &____s->seqcount irq_context: 0 rtnl_mutex pool_lock#2 irq_context: softirq drivers/char/random.c:1010 irq_context: softirq drivers/char/random.c:1010 input_pool.lock irq_context: 0 lock irq_context: 0 lock kernfs_idr_lock irq_context: 0 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem irq_context: 0 file_systems_lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_lock irq_context: 0 &type->s_umount_key/1 irq_context: 0 &type->s_umount_key/1 pool_lock#2 irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key/1 shrinker_rwsem irq_context: 0 &type->s_umount_key/1 shrinker_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key/1 list_lrus_mutex irq_context: 0 &type->s_umount_key/1 sb_lock irq_context: 0 &type->s_umount_key/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key/1 percpu_counters_lock irq_context: 0 &type->s_umount_key/1 crngs.lock irq_context: 0 &type->s_umount_key/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key/1 &zone->lock irq_context: 0 &type->s_umount_key/1 &____s->seqcount irq_context: 0 &type->s_umount_key/1 &c->lock irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key irq_context: 0 &type->s_umount_key/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &type->s_umount_key/1 &dentry->d_lock irq_context: 0 mnt_id_ida.xa_lock irq_context: 0 &dentry->d_lock irq_context: 0 mount_lock irq_context: 0 mount_lock mount_lock.seqcount irq_context: 0 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 irq_context: 0 &type->s_umount_key#2/1 pool_lock#2 irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#2/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#2/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#2/1 sb_lock irq_context: 0 &type->s_umount_key#2/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#2/1 &zone->lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &c->lock irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 irq_context: 0 &type->s_umount_key#2/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#2/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 &dentry->d_lock irq_context: 0 ucounts_lock irq_context: 0 proc_inum_ida.xa_lock irq_context: 0 init_fs.lock irq_context: 0 init_fs.lock init_fs.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 irq_context: 0 &type->s_umount_key#3/1 pool_lock#2 irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#3/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#3/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#3/1 sb_lock irq_context: 0 &type->s_umount_key#3/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#3/1 &zone->lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &c->lock irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#3/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#3/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 &dentry->d_lock irq_context: 0 &type->s_umount_key#3/1 &dentry->d_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 proc_subdir_lock irq_context: 0 proc_subdir_lock irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 &type->s_umount_key#4/1 irq_context: 0 &type->s_umount_key#4/1 pool_lock#2 irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#4/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#4/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#4/1 &c->lock irq_context: 0 &type->s_umount_key#4/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#4/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#4/1 &____s->seqcount irq_context: 0 &type->s_umount_key#4/1 sb_lock irq_context: 0 &type->s_umount_key#4/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 irq_context: 0 &type->s_umount_key#4/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#4/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 &type->s_umount_key#4/1 &dentry->d_lock irq_context: 0 cgroup_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex pool_lock#2 irq_context: 0 cgroup_mutex lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cgroup_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex cgroup_file_kn_lock irq_context: 0 cgroup_mutex css_set_lock irq_context: 0 lock cgroup_idr_lock irq_context: 0 lock cgroup_idr_lock pool_lock#2 irq_context: 0 cpuset_rwsem irq_context: 0 cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 cpuset_rwsem rcu_node_0 irq_context: 0 cpuset_rwsem callback_lock irq_context: 0 cpuset_rwsem.waiters.lock irq_context: 0 cpuset_rwsem.rss.gp_wait.lock irq_context: 0 cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cgroup_mutex &c->lock irq_context: 0 cgroup_mutex &____s->seqcount irq_context: 0 lock cgroup_idr_lock &c->lock irq_context: 0 lock cgroup_idr_lock &pcp->lock &zone->lock irq_context: 0 lock cgroup_idr_lock &zone->lock irq_context: 0 lock cgroup_idr_lock &____s->seqcount irq_context: 0 cgroup_mutex blkcg_pol_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &pcp->lock &zone->lock irq_context: 0 cgroup_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cgroup_mutex percpu_counters_lock irq_context: 0 cgroup_mutex shrinker_rwsem irq_context: 0 cgroup_mutex shrinker_rwsem pool_lock#2 irq_context: 0 cgroup_mutex shrinker_rwsem &c->lock irq_context: 0 cgroup_mutex shrinker_rwsem &n->list_lock irq_context: 0 cgroup_mutex shrinker_rwsem &____s->seqcount irq_context: 0 cgroup_mutex &base->lock irq_context: 0 cgroup_mutex &base->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex batched_entropy_u8.lock irq_context: 0 cgroup_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 cgroup_mutex &pgdat->memcg_lru.lock irq_context: 0 cgroup_mutex &zone->lock irq_context: 0 cgroup_mutex devcgroup_mutex irq_context: 0 cgroup_mutex cpu_hotplug_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 cgroup_mutex &n->list_lock irq_context: 0 &pool->lock#2 irq_context: 0 spec_ctrl_mutex irq_context: 0 spec_ctrl_mutex cpu_hotplug_lock irq_context: 0 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 init_sighand.siglock irq_context: 0 init_files.file_lock irq_context: 0 lock pidmap_lock irq_context: 0 lock pidmap_lock pool_lock#2 irq_context: 0 pidmap_lock irq_context: 0 cgroup_threadgroup_rwsem irq_context: 0 cgroup_threadgroup_rwsem css_set_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_task.pi_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_sighand.siglock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock irq_context: 0 &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &p->pi_lock irq_context: 0 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (kthreadd_done).wait.lock irq_context: 0 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock irq_context: 0 &p->alloc_lock irq_context: 0 &p->alloc_lock &____s->seqcount#2 irq_context: 0 fs_reclaim irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 wq_pool_mutex &pool->lock/1 irq_context: 0 wq_pool_mutex fs_reclaim irq_context: 0 wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 wq_pool_mutex &obj_hash[i].lock irq_context: 0 wq_pool_mutex kthread_create_lock irq_context: 0 wq_pool_mutex &p->pi_lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_mutex &rq->__lock irq_context: 0 kthread_create_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 wq_pool_mutex &x->wait irq_context: 0 wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuset_rwsem irq_context: 0 cpuset_rwsem &p->pi_lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait irq_context: 0 &x->wait &p->pi_lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex irq_context: 0 wq_mayday_lock irq_context: 0 &xa->xa_lock irq_context: 0 &pool->lock irq_context: 0 &pool->lock &p->pi_lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&pool->mayday_timer) irq_context: 0 &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) &rnp->exp_poll_lock irq_context: 0 &pool->lock/1 irq_context: 0 &pool->lock/1 &p->pi_lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&wq_watchdog_timer) irq_context: 0 &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) allocation_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq allocation_wait.lock irq_context: hardirq allocation_wait.lock &p->pi_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 batched_entropy_u8.lock irq_context: 0 kfence_freelist_lock irq_context: 0 rcu_tasks.cbs_gbl_lock irq_context: 0 rcu_tasks.cbs_gbl_lock (console_sem).lock irq_context: 0 rcu_tasks.cbs_gbl_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_tasks.cbs_gbl_lock console_lock console_srcu console_owner irq_context: 0 rcu_tasks.cbs_gbl_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_tasks.cbs_gbl_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_tasks.cbs_gbl_lock rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.cbs_gbl_lock rcu_tasks__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_tasks.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks.cbs_gbl_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 rcu_tasks.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#3 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace.cbs_gbl_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex kernel/rcu/tasks.h:147 irq_context: 0 (null) irq_context: 0 (null) tk_core.seq.seqcount irq_context: softirq &(&kfence_timer)->timer irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&timer.timer) irq_context: softirq (&timer.timer) &p->pi_lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nmi_desc[0].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock smpboot_threads_lock batched_entropy_u8.lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kfence_freelist_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &c->lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &n->list_lock irq_context: 0 &rcu_state.gp_wq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem irq_context: 0 cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &stop_pi_lock irq_context: 0 &stop_pi_lock &rq->__lock irq_context: 0 &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &stopper->lock irq_context: 0 (module_notify_list).rwsem irq_context: 0 ddebug_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback cpuset_rwsem.rss.gp_wait.lock irq_context: 0 &pmus_srcu irq_context: 0 watchdog_mutex irq_context: 0 watchdog_mutex cpu_hotplug_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 watchdog_mutex cpu_hotplug_lock &x->wait#4 irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)events irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#5 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &newf->file_lock irq_context: 0 init_fs.lock &dentry->d_lock irq_context: 0 &p->vtime.seqcount irq_context: 0 cpu_hotplug_lock mem_hotplug_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.waiters.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &swhash->hlist_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock &cpuctx_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcp_batch_high_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &xa->xa_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock wq_pool_attach_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock relay_channels_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &c->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &n->list_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pcp->lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &____s->seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock text_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock free_vmap_area_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock vmap_area_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &____s->seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock init_mm.page_table_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock (console_sem).lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock console_lock console_srcu console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock console_lock console_srcu console_owner irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock rtc_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rq->__lock &rq->__lock/1 irq_context: 0 &rq->__lock/1 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait#6 irq_context: 0 &x->wait#6 irq_context: 0 &x->wait#6 &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock &irq_desc_lock_class irq_context: 0 cpu_hotplug_lock cpuhp_state-up &swhash->hlist_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock &cpuctx_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#5 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &x->wait#7 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &pool->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_node_0 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex text_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock cpu_hotplug_lock.waiters.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock pool_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex (console_sem).lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_add_remove_lock cpuset_hotplug_work irq_context: softirq rcu_callback &obj_hash[i].lock irq_context: softirq rcu_callback pool_lock#2 irq_context: 0 cpu_hotplug_lock stop_cpus_mutex irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 &x->wait#8 irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &x->wait#8 irq_context: 0 sched_domains_mutex irq_context: 0 sched_domains_mutex fs_reclaim irq_context: 0 sched_domains_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sched_domains_mutex pool_lock#2 irq_context: 0 sched_domains_mutex &obj_hash[i].lock irq_context: 0 sched_domains_mutex pcpu_alloc_mutex irq_context: 0 sched_domains_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sched_domains_mutex &c->lock irq_context: 0 sched_domains_mutex &pcp->lock &zone->lock irq_context: 0 sched_domains_mutex &zone->lock irq_context: 0 sched_domains_mutex &____s->seqcount irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &cp->lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 sched_domains_mutex pcpu_lock irq_context: 0 slab_mutex fs_reclaim irq_context: 0 slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (memory_chain).rwsem irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 irq_context: 0 &type->s_umount_key#5/1 fs_reclaim irq_context: 0 &type->s_umount_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 pool_lock#2 irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#5/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#5/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#5/1 sb_lock irq_context: 0 &type->s_umount_key#5/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#5/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#5/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#5/1 crngs.lock irq_context: 0 &type->s_umount_key#5/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#5/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 irq_context: 0 &type->s_umount_key#5/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#5/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#5/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &type->s_umount_key#5/1 &dentry->d_lock irq_context: 0 (setup_done).wait.lock irq_context: 0 namespace_sem irq_context: 0 namespace_sem fs_reclaim irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 namespace_sem &pcp->lock &zone->lock irq_context: 0 namespace_sem &zone->lock irq_context: 0 namespace_sem &____s->seqcount irq_context: 0 namespace_sem pool_lock#2 irq_context: 0 namespace_sem &c->lock irq_context: 0 namespace_sem mnt_id_ida.xa_lock irq_context: 0 namespace_sem pcpu_alloc_mutex irq_context: 0 namespace_sem pcpu_alloc_mutex pcpu_lock irq_context: 0 namespace_sem &dentry->d_lock irq_context: 0 namespace_sem mount_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &p->alloc_lock init_fs.lock irq_context: 0 rcu_read_lock &____s->seqcount#3 irq_context: 0 file_systems_lock irq_context: 0 &type->s_umount_key#6 irq_context: 0 &type->s_umount_key#6 fs_reclaim irq_context: 0 &type->s_umount_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#6 pool_lock#2 irq_context: 0 &type->s_umount_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#6 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#6 &zone->lock irq_context: 0 &type->s_umount_key#6 &____s->seqcount irq_context: 0 &type->s_umount_key#6 &c->lock irq_context: 0 &type->s_umount_key#6 &lru->node[i].lock irq_context: 0 &type->s_umount_key#6 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#6 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key namespace_sem irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key namespace_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 rcu_read_lock &____s->seqcount#4 irq_context: 0 &sb->s_type->i_lock_key#5 irq_context: 0 &fs->lock irq_context: 0 &fs->lock &____s->seqcount#3 irq_context: 0 (setup_done).wait.lock &p->pi_lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &rq->__lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 req_lock irq_context: 0 of_mutex irq_context: 0 of_mutex fs_reclaim irq_context: 0 of_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 of_mutex pool_lock#2 irq_context: 0 of_mutex lock irq_context: 0 of_mutex lock kernfs_idr_lock irq_context: 0 of_mutex &root->kernfs_rwsem irq_context: 0 of_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &x->wait#9 irq_context: 0 &k->list_lock irq_context: 0 bus_type_sem irq_context: 0 &root->kernfs_rwsem irq_context: 0 &dev->power.lock irq_context: 0 dpm_list_mtx irq_context: 0 uevent_sock_mutex irq_context: 0 running_helpers_waitq.lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sysfs_symlink_target_lock irq_context: 0 &k->k_lock irq_context: 0 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex &dev->power.lock irq_context: 0 subsys mutex irq_context: 0 memory_blocks.xa_lock irq_context: softirq rcu_callback mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (console_sem).lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 memory_blocks.xa_lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 lock kernfs_idr_lock &c->lock irq_context: 0 lock kernfs_idr_lock &____s->seqcount irq_context: 0 subsys mutex#2 irq_context: hardirq &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 register_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock fs_reclaim irq_context: 0 register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock &c->lock irq_context: 0 register_lock &pcp->lock &zone->lock irq_context: 0 register_lock &zone->lock irq_context: 0 register_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 register_lock proc_inum_ida.xa_lock &zone->lock irq_context: 0 register_lock proc_inum_ida.xa_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock &c->lock irq_context: 0 register_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (pm_chain_head).rwsem irq_context: 0 cpufreq_governor_mutex irq_context: 0 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dyn_event_ops_mutex irq_context: 0 binfmt_lock irq_context: 0 pin_fs_lock irq_context: 0 &type->s_umount_key#7/1 irq_context: 0 &type->s_umount_key#7/1 fs_reclaim irq_context: 0 &type->s_umount_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7/1 pool_lock#2 irq_context: 0 &type->s_umount_key#7/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#7/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#7/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#7/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#7/1 sb_lock irq_context: 0 &type->s_umount_key#7/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#7/1 &____s->seqcount irq_context: 0 &type->s_umount_key#7/1 &c->lock irq_context: 0 &type->s_umount_key#7/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7/1 &sb->s_type->i_lock_key#6 irq_context: 0 &type->s_umount_key#7/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#7/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#7/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#7/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#7/1 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#7/1 &dentry->d_lock irq_context: 0 rcu_read_lock mount_lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#6 irq_context: 0 &sb->s_type->i_mutex_key#2 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 chrdevs_lock irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 irq_context: 0 &type->s_umount_key#8/1 fs_reclaim irq_context: 0 &type->s_umount_key#8/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#8/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#8/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#8/1 sb_lock irq_context: 0 &type->s_umount_key#8/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#8/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 irq_context: 0 &type->s_umount_key#8/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#8/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &type->s_umount_key#8/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 subsys mutex#3 irq_context: 0 async_lock irq_context: 0 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &dev->power.lock irq_context: 0 regulator_list_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &pcp->lock &zone->lock irq_context: 0 rtc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &dev->devres_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_nesting_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_ww_class_mutex regulator_nesting_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex devtree_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#4 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_list_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_done.lock irq_context: 0 &type->s_umount_key#9/1 irq_context: 0 &type->s_umount_key#9/1 fs_reclaim irq_context: 0 &type->s_umount_key#9/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 pool_lock#2 irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#9/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#9/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#9/1 sb_lock irq_context: 0 &type->s_umount_key#9/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#9/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#9/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#9/1 &____s->seqcount irq_context: 0 &type->s_umount_key#9/1 &c->lock irq_context: 0 &type->s_umount_key#9/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 irq_context: 0 &type->s_umount_key#9/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#9/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &type->s_umount_key#9/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pack_mutex irq_context: 0 pack_mutex fs_reclaim irq_context: 0 pack_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pack_mutex &pcp->lock &zone->lock irq_context: 0 pack_mutex &zone->lock irq_context: 0 pack_mutex &____s->seqcount irq_context: 0 pack_mutex pool_lock#2 irq_context: 0 pack_mutex free_vmap_area_lock irq_context: 0 pack_mutex vmap_area_lock irq_context: 0 pack_mutex init_mm.page_table_lock irq_context: 0 pack_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex rcu_read_lock rcu_node_0 irq_context: 0 pack_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex batched_entropy_u8.lock irq_context: 0 pack_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 pack_mutex kfence_freelist_lock irq_context: 0 pack_mutex vmap_purge_lock irq_context: 0 pack_mutex vmap_purge_lock purge_vmap_area_lock irq_context: 0 pack_mutex cpa_lock irq_context: 0 pack_mutex cpa_lock pgd_lock irq_context: 0 text_mutex irq_context: 0 text_mutex ptlock_ptr(page)#2 irq_context: 0 &fp->aux->used_maps_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 proto_list_mutex irq_context: 0 targets_mutex irq_context: 0 nl_table_lock irq_context: 0 nl_table_wait.lock irq_context: 0 net_family_lock irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 rtnl_mutex fs_reclaim irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock irq_context: 0 sparse_irq_lock fs_reclaim irq_context: 0 sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock irq_context: 0 sparse_irq_lock &root->kernfs_rwsem irq_context: 0 sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sparse_irq_lock &c->lock irq_context: 0 sparse_irq_lock &____s->seqcount irq_context: 0 sparse_irq_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &list->lock irq_context: 0 kauditd_wait.lock irq_context: 0 lock#2 irq_context: 0 lock#2 &zone->lock irq_context: 0 pcp_batch_high_lock irq_context: 0 khugepaged_mutex irq_context: 0 gdp_mutex irq_context: 0 gdp_mutex &k->list_lock irq_context: 0 gdp_mutex fs_reclaim irq_context: 0 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 gdp_mutex pool_lock#2 irq_context: 0 gdp_mutex lock irq_context: 0 gdp_mutex lock kernfs_idr_lock irq_context: 0 gdp_mutex &root->kernfs_rwsem irq_context: 0 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 subsys mutex#5 irq_context: 0 subsys mutex#5 &k->k_lock irq_context: 0 subsys mutex#6 irq_context: 0 subsys mutex#6 &k->list_lock irq_context: 0 subsys mutex#6 &k->k_lock irq_context: 0 regmap_debugfs_early_lock irq_context: 0 (acpi_reconfig_chain).rwsem irq_context: 0 __i2c_board_lock irq_context: 0 core_lock irq_context: 0 core_lock &k->list_lock irq_context: 0 core_lock &k->k_lock irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 nl_table_lock irq_context: 0 thermal_governor_lock irq_context: 0 thermal_governor_lock thermal_list_lock irq_context: 0 cpuidle_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpuidle_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock &obj_hash[i].lock irq_context: 0 cpuidle_lock (console_sem).lock irq_context: 0 cpuidle_lock console_lock console_srcu console_owner_lock irq_context: 0 cpuidle_lock console_lock console_srcu console_owner irq_context: 0 cpuidle_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpuidle_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_lock_key#8 irq_context: 0 &dir->lock irq_context: 0 k-sk_lock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR k-slock-AF_QIPCRTR irq_context: 0 k-slock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_ports.xa_lock irq_context: 0 k-sk_lock-AF_QIPCRTR pool_lock#2 irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_node_lock irq_context: 0 k-sk_lock-AF_QIPCRTR &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: 0 freezer_lock irq_context: 0 audit_backlog_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 crngs.lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 iova_cache_mutex irq_context: 0 iova_cache_mutex cpu_hotplug_lock irq_context: 0 iova_cache_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 iova_cache_mutex slab_mutex irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 iova_cache_mutex slab_mutex pool_lock#2 irq_context: 0 iova_cache_mutex slab_mutex &c->lock irq_context: 0 iova_cache_mutex slab_mutex &n->list_lock irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 subsys mutex#7 irq_context: 0 subsys mutex#7 &k->k_lock irq_context: 0 pci_config_lock irq_context: 0 device_links_lock irq_context: 0 subsys mutex#8 irq_context: 0 dev_pm_qos_mtx irq_context: 0 dev_pm_qos_mtx fs_reclaim irq_context: 0 dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dev_pm_qos_mtx pool_lock#2 irq_context: 0 dev_pm_qos_mtx &dev->power.lock irq_context: 0 dev_pm_qos_mtx pm_qos_lock irq_context: 0 dev_pm_qos_sysfs_mtx irq_context: 0 dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 dev_pm_qos_sysfs_mtx lock irq_context: 0 dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 uidhash_lock irq_context: 0 rcu_read_lock &stopper->lock irq_context: 0 rcu_read_lock &stop_pi_lock irq_context: 0 rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex (console_sem).lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 detected_devices_mutex irq_context: 0 sb_writers#2 irq_context: 0 sb_writers#2 mount_lock irq_context: 0 oom_reaper_wait.lock irq_context: 0 subsys mutex#9 irq_context: 0 &pgdat->kcompactd_wait irq_context: 0 slab_mutex rcu_read_lock &pool->lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slab_mutex &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &zone->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &c->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 memory_tier_lock irq_context: 0 memory_tier_lock fs_reclaim irq_context: 0 memory_tier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 memory_tier_lock pool_lock#2 irq_context: 0 memory_tier_lock &x->wait#9 irq_context: 0 memory_tier_lock &obj_hash[i].lock irq_context: 0 memory_tier_lock &k->list_lock irq_context: 0 memory_tier_lock &____s->seqcount irq_context: 0 memory_tier_lock &pcp->lock &zone->lock irq_context: 0 memory_tier_lock &zone->lock irq_context: 0 memory_tier_lock rcu_read_lock pool_lock#2 irq_context: 0 memory_tier_lock lock irq_context: 0 memory_tier_lock lock kernfs_idr_lock irq_context: 0 memory_tier_lock &root->kernfs_rwsem irq_context: 0 memory_tier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 memory_tier_lock bus_type_sem irq_context: 0 memory_tier_lock sysfs_symlink_target_lock irq_context: 0 memory_tier_lock &k->k_lock irq_context: 0 memory_tier_lock &c->lock irq_context: 0 memory_tier_lock &root->kernfs_rwsem irq_context: 0 memory_tier_lock &dev->power.lock irq_context: 0 memory_tier_lock dpm_list_mtx irq_context: 0 memory_tier_lock uevent_sock_mutex irq_context: 0 memory_tier_lock running_helpers_waitq.lock irq_context: 0 memory_tier_lock &dev->mutex &k->list_lock irq_context: 0 memory_tier_lock &dev->mutex &k->k_lock irq_context: 0 memory_tier_lock &dev->mutex &dev->power.lock irq_context: 0 memory_tier_lock subsys mutex#10 irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 memory_tier_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &rcu_state.expedited_wq irq_context: 0 khugepaged_mutex fs_reclaim irq_context: 0 khugepaged_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 khugepaged_mutex pool_lock#2 irq_context: 0 khugepaged_mutex kthread_create_lock irq_context: 0 khugepaged_mutex &p->pi_lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 khugepaged_mutex &rq->__lock irq_context: 0 khugepaged_mutex &x->wait irq_context: 0 khugepaged_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ksm_thread_mutex irq_context: 0 ksm_thread_wait.lock irq_context: 0 khugepaged_mutex &obj_hash[i].lock irq_context: 0 khugepaged_mutex lock#2 irq_context: 0 khugepaged_mutex lock#2 &zone->lock irq_context: 0 khugepaged_mutex pcp_batch_high_lock irq_context: 0 damon_ops_lock irq_context: 0 crypto_alg_sem irq_context: 0 crypto_alg_sem (crypto_chain).rwsem irq_context: 0 cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock &wq->mutex irq_context: 0 cpu_hotplug_lock &wq->mutex &pool->lock irq_context: 0 cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_hotplug_lock &x->wait irq_context: 0 cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 irq_context: 0 khugepaged_mm_lock irq_context: 0 khugepaged_wait.lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &wq->mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) wq_pool_mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 bio_slab_lock irq_context: 0 bio_slab_lock fs_reclaim irq_context: 0 bio_slab_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex irq_context: 0 bio_slab_lock slab_mutex fs_reclaim irq_context: 0 bio_slab_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock slab_mutex pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex &c->lock irq_context: 0 bio_slab_lock slab_mutex &n->list_lock irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock pool_lock#2 irq_context: 0 major_names_lock irq_context: 0 major_names_lock fs_reclaim irq_context: 0 major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 major_names_lock pool_lock#2 irq_context: 0 major_names_lock major_names_spinlock irq_context: 0 lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 lock kernfs_idr_lock &zone->lock irq_context: 0 lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 quarantine_lock irq_context: 0 slab_mutex remove_cache_srcu irq_context: 0 slab_mutex remove_cache_srcu quarantine_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 console_lock fs_reclaim irq_context: 0 console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &obj_hash[i].lock irq_context: hardirq &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &base->lock irq_context: hardirq &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &base->lock &obj_hash[i].lock irq_context: 0 console_lock &x->wait#9 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 console_lock &k->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) (&timer.timer) irq_context: 0 console_lock gdp_mutex irq_context: 0 console_lock gdp_mutex &k->list_lock irq_context: 0 console_lock gdp_mutex fs_reclaim irq_context: 0 console_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock gdp_mutex pool_lock#2 irq_context: 0 rcu_tasks.tasks_gp_mutex (console_sem).lock irq_context: 0 console_lock gdp_mutex lock irq_context: 0 rcu_tasks.tasks_gp_mutex console_owner_lock irq_context: 0 console_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock lock irq_context: 0 console_lock lock kernfs_idr_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock bus_type_sem irq_context: 0 console_lock sysfs_symlink_target_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &dev->power.lock irq_context: 0 console_lock dpm_list_mtx irq_context: 0 console_lock uevent_sock_mutex irq_context: 0 console_lock running_helpers_waitq.lock irq_context: 0 console_lock subsys mutex#11 irq_context: 0 console_lock subsys mutex#11 &k->k_lock irq_context: 0 &meta->lock irq_context: 0 *(&acpi_gbl_hardware_lock) irq_context: 0 *(&acpi_gbl_gpe_lock) irq_context: 0 acpi_ioapic_lock ioapic_mutex irq_context: 0 &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 shrink_qlist.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) remove_cache_srcu_srcu_usage.lock irq_context: 0 &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock irq_context: 0 cpu_hotplug_lock flush_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock (work_completion)(&sfw->work) irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock (wq_completion)slub_flushwq irq_context: 0 cpu_hotplug_lock flush_lock &x->wait#10 irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 (wq_completion)slub_flushwq irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &c->lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &n->list_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &obj_hash[i].lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &n->list_lock &c->lock irq_context: 0 system_transition_mutex irq_context: 0 (power_off_prep_handler_list).rwsem irq_context: 0 power_off_handler_list.lock irq_context: 0 (restart_prep_handler_list).rwsem irq_context: 0 (reboot_notifier_list).rwsem irq_context: 0 *(&acpi_gbl_gpe_lock) (console_sem).lock irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner_lock irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner &port_lock_key irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock irq_context: 0 acpi_scan_lock semaphore->lock irq_context: 0 acpi_scan_lock fs_reclaim irq_context: 0 acpi_scan_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock pool_lock#2 irq_context: 0 acpi_scan_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &x->wait#9 irq_context: 0 acpi_scan_lock acpi_device_lock irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_device_lock &c->lock irq_context: 0 acpi_scan_lock acpi_device_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_device_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_device_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_device_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock &xa->xa_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_device_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &k->list_lock irq_context: 0 acpi_scan_lock &c->lock irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock &zone->lock irq_context: 0 acpi_scan_lock &____s->seqcount irq_context: 0 acpi_scan_lock lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock bus_type_sem irq_context: 0 acpi_scan_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &k->k_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->power.lock irq_context: 0 acpi_scan_lock dpm_list_mtx irq_context: 0 acpi_scan_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock subsys mutex#12 irq_context: 0 acpi_scan_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock &n->list_lock irq_context: 0 acpi_scan_lock &n->list_lock &c->lock irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock pci_config_lock irq_context: 0 acpi_scan_lock &rq->__lock irq_context: 0 acpi_scan_lock batched_entropy_u8.lock irq_context: 0 acpi_scan_lock kfence_freelist_lock irq_context: 0 acpi_scan_lock &meta->lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock &c->lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 acpi_scan_lock quarantine_lock irq_context: 0 acpi_scan_lock (console_sem).lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_mmcfg_lock irq_context: 0 acpi_scan_lock resource_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &device->physical_node_lock pool_lock#2 irq_context: 0 acpi_scan_lock &device->physical_node_lock lock irq_context: 0 acpi_scan_lock &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock fwnode_link_lock irq_context: 0 acpi_scan_lock fwnode_link_lock &k->k_lock irq_context: 0 acpi_scan_lock devtree_lock irq_context: 0 acpi_scan_lock gdp_mutex irq_context: 0 acpi_scan_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock gdp_mutex &c->lock irq_context: 0 acpi_scan_lock gdp_mutex &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock gdp_mutex &zone->lock irq_context: 0 acpi_scan_lock gdp_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock gdp_mutex &____s->seqcount irq_context: 0 acpi_scan_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock gdp_mutex lock irq_context: 0 acpi_scan_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock subsys mutex#13 irq_context: 0 acpi_scan_lock subsys mutex#13 &k->k_lock irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_acpi_companion_lookup_sem irq_context: 0 acpi_scan_lock pci_slot_mutex irq_context: 0 acpi_scan_lock tk_core.seq.seqcount irq_context: 0 acpi_scan_lock resource_alignment_lock irq_context: 0 acpi_scan_lock device_links_srcu irq_context: 0 acpi_scan_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 acpi_scan_lock subsys mutex#14 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &x->wait#9 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock bus_type_sem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#15 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock events_lock irq_context: 0 &pgdat->kswapd_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock &____s->seqcount irq_context: softirq drivers/char/random.c:251 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (next_reseed).work irq_context: 0 (wq_completion)events_unbound (next_reseed).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work input_pool.lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work base_crng.lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock pool_lock irq_context: softirq mm/vmstat.c:2014 irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock batched_entropy_u8.lock crngs.lock irq_context: 0 acpi_scan_lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 acpi_scan_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 acpi_scan_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock pci_rescan_remove_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock subsys mutex#3 irq_context: 0 acpi_scan_lock acpi_link_lock irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_link_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_link_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_link_lock &c->lock irq_context: 0 acpi_scan_lock acpi_link_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_link_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_link_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_link_lock pci_config_lock irq_context: 0 acpi_scan_lock acpi_link_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock (console_sem).lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock subsys mutex#15 irq_context: 0 acpi_scan_lock subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock events_lock irq_context: 0 acpi_scan_lock power_resource_list_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 acpi_device_lock irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 k-sk_lock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 k-slock-AF_NETLINK irq_context: 0 &type->s_umount_key#10/1 irq_context: 0 &type->s_umount_key#10/1 fs_reclaim irq_context: 0 &type->s_umount_key#10/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 pool_lock#2 irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#10/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#10/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#10/1 sb_lock irq_context: 0 &type->s_umount_key#10/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#10/1 &____s->seqcount irq_context: 0 &type->s_umount_key#10/1 &c->lock irq_context: 0 &type->s_umount_key#10/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 irq_context: 0 &type->s_umount_key#10/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#10/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#10/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 irq_context: 0 &type->s_umount_key#11/1 fs_reclaim irq_context: 0 &type->s_umount_key#11/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 pool_lock#2 irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#11/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#11/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#11/1 sb_lock irq_context: 0 &type->s_umount_key#11/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#11/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#11/1 &zone->lock irq_context: 0 &type->s_umount_key#11/1 &____s->seqcount irq_context: 0 &type->s_umount_key#11/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#11/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#11/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 irq_context: 0 &type->s_umount_key#11/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#11/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 &dentry->d_lock irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex &rq->__lock irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start dma_fence_map irq_context: 0 delayed_uprobe_lock irq_context: 0 key irq_context: 0 attribute_container_mutex irq_context: 0 triggers_list_lock irq_context: 0 leds_list_lock irq_context: 0 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 bus_type_sem irq_context: 0 (usb_notifier_list).rwsem irq_context: 0 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 fill_pool_map-wait-type-override pool_lock irq_context: 0 &device->physical_node_lock irq_context: 0 rc_map_lock irq_context: 0 subsys mutex#16 irq_context: 0 fill_pool_map-wait-type-override &c->lock irq_context: 0 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 fill_pool_map-wait-type-override &zone->lock irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 resource_lock irq_context: 0 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 free_vmap_area_lock pool_lock#2 irq_context: 0 &entry->access irq_context: 0 info_mutex irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 info_mutex fs_reclaim irq_context: 0 info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 info_mutex &pcp->lock &zone->lock irq_context: 0 info_mutex &zone->lock irq_context: 0 info_mutex &____s->seqcount irq_context: 0 info_mutex pool_lock#2 irq_context: 0 info_mutex rcu_read_lock pool_lock#2 irq_context: 0 info_mutex &obj_hash[i].lock irq_context: 0 info_mutex proc_inum_ida.xa_lock irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 info_mutex &c->lock irq_context: 0 kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex running_helpers_waitq.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock nl_table_lock irq_context: 0 rcu_read_lock nl_table_wait.lock irq_context: 0 qdisc_mod_lock irq_context: 0 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &obj_hash[i].lock irq_context: 0 bt_proto_lock irq_context: 0 hci_cb_list_lock irq_context: 0 mgmt_chan_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rate_ctrl_mutex irq_context: 0 rate_ctrl_mutex fs_reclaim irq_context: 0 rate_ctrl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rate_ctrl_mutex pool_lock#2 irq_context: 0 netlbl_domhsh_lock irq_context: 0 netlbl_unlhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock pool_lock#2 irq_context: 0 misc_mtx irq_context: 0 misc_mtx fs_reclaim irq_context: 0 misc_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx pool_lock#2 irq_context: 0 misc_mtx &x->wait#9 irq_context: 0 misc_mtx &obj_hash[i].lock irq_context: 0 misc_mtx &k->list_lock irq_context: 0 misc_mtx gdp_mutex irq_context: 0 misc_mtx gdp_mutex &k->list_lock irq_context: 0 misc_mtx gdp_mutex fs_reclaim irq_context: 0 misc_mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx gdp_mutex pool_lock#2 irq_context: 0 misc_mtx gdp_mutex lock irq_context: 0 misc_mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx lock irq_context: 0 misc_mtx lock kernfs_idr_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx bus_type_sem irq_context: 0 misc_mtx &c->lock irq_context: 0 misc_mtx &pcp->lock &zone->lock irq_context: 0 misc_mtx &zone->lock irq_context: 0 misc_mtx &____s->seqcount irq_context: 0 misc_mtx sysfs_symlink_target_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &dev->power.lock irq_context: 0 misc_mtx dpm_list_mtx irq_context: 0 misc_mtx req_lock irq_context: 0 misc_mtx &p->pi_lock irq_context: 0 misc_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx &rq->__lock irq_context: 0 sb_writers irq_context: 0 sb_writers mount_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 &x->wait#11 irq_context: 0 misc_mtx &x->wait#11 irq_context: 0 misc_mtx uevent_sock_mutex irq_context: 0 misc_mtx running_helpers_waitq.lock irq_context: 0 misc_mtx subsys mutex#18 irq_context: 0 misc_mtx subsys mutex#18 &k->k_lock irq_context: 0 rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex irq_context: 0 input_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex irq_context: 0 input_mutex input_devices_poll_wait.lock irq_context: 0 (netlink_chain).rwsem irq_context: 0 proto_tab_lock irq_context: 0 resource_lock pool_lock#2 irq_context: 0 resource_lock &obj_hash[i].lock irq_context: 0 random_ready_notifier.lock irq_context: 0 random_ready_notifier.lock crngs.lock irq_context: 0 misc_mtx misc_minors_ida.xa_lock irq_context: 0 misc_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 vga_lock#2 irq_context: 0 vga_lock#2 pci_config_lock irq_context: 0 vga_lock#2 (console_sem).lock irq_context: 0 vga_lock#2 console_lock console_srcu console_owner_lock irq_context: 0 vga_lock#2 console_lock console_srcu console_owner irq_context: 0 vga_lock#2 console_lock console_srcu console_owner &port_lock_key irq_context: 0 vga_lock#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#12/1 irq_context: 0 &type->s_umount_key#12/1 fs_reclaim irq_context: 0 &type->s_umount_key#12/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 pool_lock#2 irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#12/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#12/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#12/1 sb_lock irq_context: 0 &type->s_umount_key#12/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#12/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 irq_context: 0 &type->s_umount_key#12/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#12/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#12/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#11 irq_context: 0 clocksource_mutex cpu_hotplug_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &x->wait#8 irq_context: 0 clocksource_mutex (console_sem).lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner_lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner irq_context: 0 clocksource_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 clocksource_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &c->lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &____s->seqcount irq_context: 0 tomoyo_ss file_systems_lock irq_context: 0 tomoyo_ss fs_reclaim irq_context: 0 tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 &disk->open_mutex bdev_lock irq_context: 0 &bdev->bd_fsfreeze_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#24/1 sb_lock irq_context: 0 &type->s_umount_key#24/1 irq_context: 0 &type->s_umount_key#24/1 fs_reclaim irq_context: 0 &type->s_umount_key#24/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#24/1 &wq->mutex irq_context: 0 &type->s_umount_key#24/1 &wq->mutex &pool->lock irq_context: 0 &type->s_umount_key#24/1 &____s->seqcount irq_context: 0 &type->s_umount_key#24/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#24/1 &zone->lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 kthread_create_lock irq_context: 0 &type->s_umount_key#24/1 &p->pi_lock irq_context: 0 &type->s_umount_key#24/1 &x->wait irq_context: 0 &type->s_umount_key#24/1 &rq->__lock irq_context: hardirq tick_broadcast_lock irq_context: hardirq tick_broadcast_lock jiffies_lock irq_context: hardirq hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#24/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events timer_update_work irq_context: 0 &type->s_umount_key#24/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#5 irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex irq_context: 0 &sb->s_type->i_mutex_key#5 rename_lock.seqcount irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 &sb->s_type->i_mutex_key#5 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#5 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#12 irq_context: 0 &sb->s_type->i_mutex_key#5 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#5 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#12 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#5 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#5 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#5 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#5 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#14/1 irq_context: 0 &type->s_umount_key#14/1 fs_reclaim irq_context: 0 &type->s_umount_key#14/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14/1 pool_lock#2 irq_context: 0 &type->s_umount_key#14/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#14/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#14/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#14/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#14/1 sb_lock irq_context: 0 &type->s_umount_key#14/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#14/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14/1 &sb->s_type->i_lock_key#13 irq_context: 0 &type->s_umount_key#14/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#14/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#14/1 &sb->s_type->i_lock_key#13 &dentry->d_lock irq_context: 0 &type->s_umount_key#14/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#15/1 irq_context: 0 &type->s_umount_key#15/1 fs_reclaim irq_context: 0 &type->s_umount_key#15/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#15/1 pool_lock#2 irq_context: 0 &type->s_umount_key#15/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#15/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#15/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#15/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#15/1 sb_lock irq_context: 0 &type->s_umount_key#15/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#15/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#15/1 &sb->s_type->i_lock_key#14 irq_context: 0 &type->s_umount_key#15/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#15/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#15/1 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 &type->s_umount_key#15/1 &dentry->d_lock irq_context: 0 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#16/1 irq_context: 0 &type->s_umount_key#16/1 fs_reclaim irq_context: 0 &type->s_umount_key#16/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#16/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#16/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#16/1 sb_lock irq_context: 0 &type->s_umount_key#16/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#16/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#15 irq_context: 0 &type->s_umount_key#16/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#16/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 &type->s_umount_key#16/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#15 irq_context: 0 kclist_lock irq_context: 0 kclist_lock resource_lock irq_context: 0 kclist_lock fs_reclaim irq_context: 0 kclist_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kclist_lock pool_lock#2 irq_context: 0 &type->s_umount_key#17/1 irq_context: 0 &type->s_umount_key#17/1 fs_reclaim irq_context: 0 &type->s_umount_key#17/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 pool_lock#2 irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#17/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#17/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#17/1 sb_lock irq_context: 0 &type->s_umount_key#17/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#17/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#17/1 &zone->lock irq_context: 0 &type->s_umount_key#17/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#17/1 &____s->seqcount irq_context: 0 &type->s_umount_key#17/1 &c->lock irq_context: 0 &type->s_umount_key#17/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#16 irq_context: 0 &type->s_umount_key#17/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#17/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 &type->s_umount_key#17/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &____s->seqcount irq_context: 0 tomoyo_ss irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 tomoyo_ss tomoyo_policy_lock &pcp->lock &zone->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &zone->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock rcu_read_lock pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock &obj_hash[i].lock irq_context: 0 tomoyo_ss (console_sem).lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner_lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner irq_context: 0 tomoyo_ss console_lock console_srcu console_owner &port_lock_key irq_context: 0 tomoyo_ss console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#18/1 irq_context: 0 &type->s_umount_key#18/1 fs_reclaim irq_context: 0 &type->s_umount_key#18/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 pool_lock#2 irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#18/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#18/1 &c->lock irq_context: 0 &type->s_umount_key#18/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#18/1 &zone->lock irq_context: 0 &type->s_umount_key#18/1 &____s->seqcount irq_context: 0 &type->s_umount_key#18/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#18/1 sb_lock irq_context: 0 &type->s_umount_key#18/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#18/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#17 irq_context: 0 &type->s_umount_key#18/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#18/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &type->s_umount_key#18/1 &dentry->d_lock irq_context: 0 &ns->lock irq_context: 0 &ns->lock &dentry->d_lock irq_context: 0 &ns->lock pin_fs_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &sb->s_type->i_lock_key#17 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 rename_lock.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 pool_lock#2 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &dentry->d_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &dentry->d_lock &wq irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 mmu_notifier_invalidate_range_start irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &s->s_inode_list_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 tk_core.seq.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &type->s_umount_key#19 irq_context: 0 &type->s_umount_key#19 sb_lock irq_context: 0 &type->s_umount_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 pnp_lock irq_context: 0 pnp_lock fs_reclaim irq_context: 0 pnp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pnp_lock pool_lock#2 irq_context: 0 &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 &device->physical_node_lock fs_reclaim irq_context: 0 &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &device->physical_node_lock pool_lock#2 irq_context: 0 &device->physical_node_lock lock irq_context: 0 &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 fwnode_link_lock irq_context: 0 fwnode_link_lock &k->k_lock irq_context: 0 subsys mutex#19 irq_context: 0 &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 subsys mutex#20 irq_context: 0 subsys mutex#20 &k->k_lock irq_context: 0 subsys mutex#21 irq_context: 0 subsys mutex#21 &k->k_lock irq_context: 0 subsys mutex#22 irq_context: 0 subsys mutex#22 &k->k_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &____s->seqcount irq_context: 0 tty_mutex irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &c->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock irq_context: softirq led_lock irq_context: 0 misc_mtx &obj_hash[i].lock pool_lock irq_context: 0 misc_mtx rcu_read_lock pool_lock#2 irq_context: 0 subsys mutex#23 irq_context: 0 subsys mutex#23 &k->list_lock irq_context: 0 subsys mutex#23 &k->k_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &xa->xa_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex kthread_create_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &x->wait irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 netevent_notif_chain.lock irq_context: 0 clients_rwsem irq_context: 0 clients_rwsem fs_reclaim irq_context: 0 clients_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 clients_rwsem clients.xa_lock irq_context: 0 devices_rwsem irq_context: 0 clients_rwsem clients.xa_lock pool_lock#2 irq_context: 0 (blocking_lsm_notifier_chain).rwsem irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 (inetaddr_chain).rwsem irq_context: 0 inet6addr_chain.lock irq_context: 0 buses_mutex irq_context: 0 offload_lock irq_context: 0 slab_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 inetsw_lock irq_context: 0 (wq_completion)events_power_efficient irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: 0 ptype_lock irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem slab_mutex irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem slab_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem slab_mutex &c->lock irq_context: 0 pernet_ops_rwsem slab_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem slab_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &zone->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tcp_ulp_list_lock irq_context: 0 xfrm_state_afinfo_lock irq_context: 0 xfrm_policy_afinfo_lock irq_context: 0 xfrm_input_afinfo_lock irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex krc.lock irq_context: 0 rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock &base->lock irq_context: 0 rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_highpri irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &zone->lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &____s->seqcount irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) pool_lock#2 irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) krc.lock irq_context: 0 &hashinfo->lock irq_context: 0 tcp_cong_list_lock irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (rpc_pipefs_notifier_list).rwsem irq_context: 0 svc_xprt_class_lock irq_context: 0 xprt_list_lock irq_context: 0 xprt_list_lock (console_sem).lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner_lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner irq_context: 0 xprt_list_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 xprt_list_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) cache_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock quarantine_lock irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 pcibios_fwaddrmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock init_fs.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 mount_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: softirq (&tcp_orphan_timer) irq_context: softirq (&tcp_orphan_timer) &obj_hash[i].lock irq_context: softirq (&tcp_orphan_timer) &base->lock irq_context: softirq (&tcp_orphan_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock pool_lock#2 irq_context: 0 umhelper_sem irq_context: 0 umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_files.file_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->alloc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &drv->dynids.lock irq_context: 0 umh_sysctl_lock irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &zone->lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &zone->lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &sig->cred_guard_mutex irq_context: 0 &sig->cred_guard_mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &fs->lock irq_context: 0 &sig->cred_guard_mutex &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &zone->lock irq_context: 0 &sig->cred_guard_mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &____s->seqcount#3 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex pgd_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex key irq_context: 0 &sig->cred_guard_mutex pcpu_lock irq_context: 0 &sig->cred_guard_mutex percpu_counters_lock irq_context: 0 &tsk->futex_exit_mutex irq_context: 0 &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &p->alloc_lock &fs->lock irq_context: 0 &child->perf_event_mutex irq_context: 0 css_set_lock irq_context: 0 tasklist_lock irq_context: 0 tasklist_lock &pid->wait_pidfd irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit irq_context: 0 tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pool_lock#2 irq_context: 0 tasklist_lock &obj_hash[i].lock irq_context: 0 low_water_lock irq_context: 0 low_water_lock (console_sem).lock irq_context: 0 low_water_lock console_lock console_srcu console_owner_lock irq_context: 0 low_water_lock console_lock console_srcu console_owner irq_context: 0 low_water_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state-up pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &x->wait#6 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 low_water_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &x->wait#6 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex pool_lock#2 irq_context: 0 vendor_module_lock irq_context: 0 vendor_module_lock slab_mutex irq_context: 0 vendor_module_lock slab_mutex fs_reclaim irq_context: 0 vendor_module_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vendor_module_lock slab_mutex pool_lock#2 irq_context: 0 vendor_module_lock slab_mutex &c->lock irq_context: 0 vendor_module_lock slab_mutex &n->list_lock irq_context: 0 vendor_module_lock slab_mutex pcpu_alloc_mutex irq_context: 0 vendor_module_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 vendor_module_lock pcpu_alloc_mutex irq_context: 0 vendor_module_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 vendor_module_lock &obj_hash[i].lock irq_context: 0 vendor_module_lock percpu_counters_lock irq_context: 0 vendor_module_lock fs_reclaim irq_context: 0 vendor_module_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vendor_module_lock pool_lock#2 irq_context: 0 vendor_module_lock shrinker_rwsem irq_context: softirq &(&cache_cleaner)->timer irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vendor_module_lock &____s->seqcount irq_context: 0 vendor_module_lock &pcp->lock &zone->lock irq_context: 0 vendor_module_lock &zone->lock irq_context: 0 vendor_module_lock cpu_hotplug_lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex text_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex.wait_lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex &p->pi_lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vendor_module_lock timekeeper_lock irq_context: 0 vendor_module_lock timekeeper_lock pvclock_gtod_data irq_context: 0 slab_mutex batched_entropy_u8.lock irq_context: 0 slab_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 slab_mutex batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 slab_mutex kfence_freelist_lock irq_context: hardirq timekeeper_lock tk_core.seq.seqcount pvclock_gtod_data irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &cfs_rq->removed.lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &obj_hash[i].lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex pool_lock#2 irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 clocksource_mutex &rq->__lock irq_context: 0 jiffies_seq.seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#9 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up bus_type_sem irq_context: 0 cpu_hotplug_lock cpuhp_state-up sysfs_symlink_target_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up dpm_list_mtx irq_context: 0 cpu_hotplug_lock cpuhp_state-up req_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#11 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock &wq irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#5 irq_context: 0 &x->wait#11 &p->pi_lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up uevent_sock_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up running_helpers_waitq.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#24 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#24 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock pool_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#25 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#25 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 crypto_alg_sem irq_context: 0 crypto_alg_sem &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock pool_lock#2 irq_context: 0 pm_qos_lock irq_context: 0 subsys mutex#26 irq_context: 0 subsys mutex#27 irq_context: 0 subsys mutex#27 &k->list_lock irq_context: 0 subsys mutex#27 &k->k_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &rq->__lock irq_context: 0 tasklist_lock &c->lock irq_context: 0 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 subsys mutex#28 irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 pcpu_alloc_mutex &rq->__lock irq_context: 0 slab_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 &type->s_umount_key#24/1 wq_pool_mutex irq_context: 0 &type->s_umount_key#24/1 wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#24/1 mmu_notifier_invalidate_range_start irq_context: softirq rcu_callback rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) irq_context: 0 (wq_completion)events (work_completion)(&p->wq) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock pool_lock#2 irq_context: 0 trace_event_sem trace_event_sem.wait_lock irq_context: 0 trace_event_sem &rq->__lock irq_context: 0 trace_event_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 trace_event_sem rcu_read_lock &rq->__lock irq_context: 0 trace_event_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &obj_hash[i].lock pool_lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq mm/memcontrol.c:589 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work cgroup_rstat_lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 &base->lock irq_context: 0 &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: softirq &(&group->avgs_work)->timer irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock &obj_hash[i].lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&krcp->monitor_work)->timer irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&tbl->managed_work)->timer irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#24/1 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#24/1 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 lock#4 irq_context: 0 &type->s_umount_key#24/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#24/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#24/1 &dd->lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#24/1 bit_wait_table + i irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq bit_wait_table + i irq_context: softirq bit_wait_table + i &p->pi_lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 ext4_li_mtx irq_context: 0 &type->s_umount_key#24/1 &wq->mutex &x->wait#10 irq_context: 0 &type->s_umount_key#24/1 wq_mayday_lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &rq->__lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#24/1 wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &type->s_umount_key#24/1 rcu_state.exp_mutex &rq->__lock irq_context: 0 &type->s_umount_key#24/1 &sbi->old_work_lock irq_context: 0 &type->s_umount_key#24/1 (work_completion)(&(&sbi->old_work)->work) irq_context: 0 &type->s_umount_key#24/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#24/1 sb_lock irq_context: 0 &xa->xa_lock#12 irq_context: 0 sb_lock &obj_hash[i].lock irq_context: 0 sb_lock pool_lock#2 irq_context: 0 tomoyo_ss quarantine_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 fs_reclaim irq_context: 0 pmus_lock fs_reclaim irq_context: 0 pmus_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pmus_lock &k->list_lock irq_context: 0 pmus_lock lock irq_context: 0 pmus_lock lock kernfs_idr_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pmus_lock uevent_sock_mutex irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pmus_lock running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 pmus_lock &x->wait#9 irq_context: 0 pmus_lock bus_type_sem irq_context: 0 pmus_lock sysfs_symlink_target_lock irq_context: 0 pmus_lock &k->k_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &c->lock irq_context: 0 pmus_lock &____s->seqcount irq_context: 0 pmus_lock &dev->power.lock irq_context: 0 pmus_lock dpm_list_mtx irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pmus_lock &dev->mutex &k->list_lock irq_context: 0 pmus_lock &dev->mutex &k->k_lock irq_context: 0 pmus_lock &dev->mutex &dev->power.lock irq_context: 0 pmus_lock subsys mutex#29 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pmus_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 pmus_lock &pcp->lock &zone->lock irq_context: 0 pmus_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 key_user_lock irq_context: 0 key_serial_lock irq_context: 0 key_construction_mutex irq_context: 0 &type->lock_class irq_context: 0 &type->lock_class keyring_serialise_link_lock irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 keyring_serialise_link_lock irq_context: 0 &pgdat->kswapd_lock fs_reclaim irq_context: 0 &pgdat->kswapd_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pgdat->kswapd_lock pool_lock#2 irq_context: 0 &pgdat->kswapd_lock kthread_create_lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pgdat->kswapd_lock &x->wait irq_context: 0 &pgdat->kswapd_lock &rq->__lock irq_context: 0 &pgdat->kswapd_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 sb_lock irq_context: 0 &x->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pgdat->kswapd_lock &obj_hash[i].lock irq_context: 0 &pgdat->kswapd_wait irq_context: 0 &type->s_umount_key#25/1 irq_context: 0 &type->s_umount_key#25/1 fs_reclaim irq_context: 0 &type->s_umount_key#25/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 &____s->seqcount irq_context: 0 &type->s_umount_key#25/1 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#25/1 lock#4 irq_context: 0 &type->s_umount_key#25/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#25/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#25/1 &dd->lock irq_context: 0 &type->s_umount_key#25/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 &rq->__lock irq_context: 0 &type->s_umount_key#25/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#25/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#25/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#25/1 lock#4 &lruvec->lru_lock irq_context: 0 list_lrus_mutex irq_context: 0 drivers_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock irq_context: 0 damon_dbgfs_lock fs_reclaim irq_context: 0 damon_dbgfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock pool_lock#2 irq_context: 0 damon_dbgfs_lock tk_core.seq.seqcount irq_context: 0 damon_dbgfs_lock damon_ops_lock irq_context: 0 damon_dbgfs_lock pin_fs_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &type->s_umount_key#20/1 irq_context: 0 &type->s_umount_key#20/1 fs_reclaim irq_context: 0 &type->s_umount_key#20/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#20/1 pool_lock#2 irq_context: 0 &type->s_umount_key#20/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#20/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#20/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#20/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#20/1 sb_lock irq_context: 0 &type->s_umount_key#20/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#20/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#20/1 &sb->s_type->i_lock_key#18 irq_context: 0 &type->s_umount_key#20/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#20/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#20/1 &sb->s_type->i_lock_key#18 &dentry->d_lock irq_context: 0 &type->s_umount_key#20/1 &dentry->d_lock irq_context: 0 dq_list_lock irq_context: 0 &type->s_umount_key#21/1 irq_context: 0 &type->s_umount_key#21/1 fs_reclaim irq_context: 0 &type->s_umount_key#21/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 pool_lock#2 irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#21/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#21/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#21/1 sb_lock irq_context: 0 &type->s_umount_key#21/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#21/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#19 irq_context: 0 &type->s_umount_key#21/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#21/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &type->s_umount_key#21/1 &dentry->d_lock irq_context: 0 configfs_subsystem_mutex irq_context: 0 &sb->s_type->i_mutex_key#7/1 irq_context: 0 &sb->s_type->i_mutex_key#7/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ecryptfs_kthread_ctl.wait irq_context: 0 ecryptfs_daemon_hash_mux irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ecryptfs_daemon_hash_mux pool_lock#2 irq_context: 0 ecryptfs_msg_ctx_lists_mux irq_context: 0 ecryptfs_msg_ctx_lists_mux &ecryptfs_msg_ctx_arr[i].mux irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem running_helpers_waitq.lock irq_context: 0 nfs_version_lock irq_context: 0 key_types_sem irq_context: 0 key_types_sem (console_sem).lock irq_context: 0 key_types_sem console_lock console_srcu console_owner_lock irq_context: 0 key_types_sem console_lock console_srcu console_owner irq_context: 0 key_types_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 key_types_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pnfs_spinlock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: hardirq rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 nls_lock irq_context: 0 slab_mutex rcu_read_lock pool_lock#2 irq_context: 0 slab_mutex &obj_hash[i].lock irq_context: 0 jffs2_compressor_list_lock irq_context: 0 &child->perf_event_mutex &rq->__lock irq_context: 0 next_tag_value_lock irq_context: softirq (&timer.timer) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 log_redrive_lock irq_context: 0 &TxAnchor.LazyLock irq_context: 0 &TxAnchor.LazyLock jfs_commit_thread_wait.lock irq_context: 0 jfsTxnLock irq_context: 0 ocfs2_stack_lock irq_context: 0 ocfs2_stack_lock (console_sem).lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner_lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 o2hb_callback_sem irq_context: 0 o2net_handler_lock irq_context: 0 &type->s_umount_key#22/1 irq_context: 0 &type->s_umount_key#22/1 fs_reclaim irq_context: 0 &type->s_umount_key#22/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#22/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#22/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#22/1 sb_lock irq_context: 0 &type->s_umount_key#22/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#22/1 &____s->seqcount irq_context: 0 &type->s_umount_key#22/1 &c->lock irq_context: 0 &type->s_umount_key#22/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#20 irq_context: 0 &type->s_umount_key#22/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#22/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#22/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_hook_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &zone->lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 alg_types_sem irq_context: 0 alg_types_sem fs_reclaim irq_context: 0 alg_types_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 alg_types_sem pool_lock#2 irq_context: 0 dma_list_mutex irq_context: 0 asymmetric_key_parsers_sem irq_context: 0 asymmetric_key_parsers_sem (console_sem).lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner_lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 blkcg_pol_register_mutex irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex cgroup_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock kernfs_idr_lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 elv_list_lock irq_context: 0 crc_t10dif_mutex irq_context: 0 crc_t10dif_mutex crypto_alg_sem irq_context: 0 crc_t10dif_mutex fs_reclaim irq_context: 0 crc_t10dif_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc_t10dif_mutex pool_lock#2 irq_context: 0 crc64_rocksoft_mutex irq_context: 0 crc64_rocksoft_mutex crypto_alg_sem irq_context: 0 crc64_rocksoft_mutex fs_reclaim irq_context: 0 crc64_rocksoft_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc64_rocksoft_mutex pool_lock#2 irq_context: 0 ts_mod_lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex device_links_lock irq_context: 0 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex lock irq_context: 0 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &x->wait#9 irq_context: 0 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex gdp_mutex irq_context: 0 &dev->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex gdp_mutex lock irq_context: 0 &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &c->lock irq_context: 0 &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex bus_type_sem irq_context: 0 &dev->mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex &rq->__lock irq_context: 0 &dev->mutex subsys mutex#30 irq_context: 0 &dev->mutex subsys mutex#30 &k->k_lock irq_context: 0 &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex input_mutex irq_context: 0 &dev->mutex input_mutex fs_reclaim irq_context: 0 &dev->mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex input_mutex pool_lock#2 irq_context: 0 &dev->mutex input_mutex &dev->mutex#2 irq_context: 0 &dev->mutex input_mutex input_devices_poll_wait.lock irq_context: 0 &dev->mutex wakeup_ida.xa_lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &zone->lock irq_context: 0 &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#15 irq_context: 0 &dev->mutex subsys mutex#15 &k->k_lock irq_context: 0 &dev->mutex events_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &dev->mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &dev->mutex wakeup_srcu irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex wakeup_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) wakeup_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex &x->wait#3 irq_context: 0 &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&ssp->srcu_sup->work)->timer irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex wakeup_srcu_srcu_usage.lock irq_context: 0 &dev->mutex (&ws->timer) irq_context: 0 &dev->mutex &base->lock irq_context: 0 &dev->mutex &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex kernfs_idr_lock irq_context: 0 &dev->mutex &ws->lock irq_context: 0 &dev->mutex deleted_ws.lock irq_context: 0 &dev->mutex semaphore->lock irq_context: 0 &dev->mutex *(&acpi_gbl_hardware_lock) irq_context: 0 &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 register_count_mutex irq_context: 0 register_count_mutex &k->list_lock irq_context: 0 register_count_mutex fs_reclaim irq_context: 0 register_count_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_count_mutex pool_lock#2 irq_context: 0 register_count_mutex lock irq_context: 0 register_count_mutex lock kernfs_idr_lock irq_context: 0 register_count_mutex &root->kernfs_rwsem irq_context: 0 register_count_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_count_mutex &k->k_lock irq_context: 0 register_count_mutex uevent_sock_mutex irq_context: 0 register_count_mutex &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_count_mutex running_helpers_waitq.lock irq_context: 0 register_count_mutex &rq->__lock irq_context: 0 (cpufreq_policy_notifier_list).rwsem irq_context: 0 &dev->mutex cpu_add_remove_lock irq_context: 0 &dev->mutex tick_broadcast_lock irq_context: 0 &dev->mutex cpuidle_driver_lock irq_context: 0 &dev->mutex cpuidle_lock irq_context: 0 &dev->mutex cpuidle_lock fs_reclaim irq_context: 0 &dev->mutex cpuidle_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpuidle_lock pool_lock#2 irq_context: 0 &dev->mutex cpuidle_lock lock irq_context: 0 &dev->mutex cpuidle_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex cpuidle_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex cpuidle_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex cpuidle_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex cpuidle_lock &c->lock irq_context: 0 &dev->mutex cpuidle_lock &____s->seqcount irq_context: 0 &dev->mutex thermal_cdev_ida.xa_lock irq_context: 0 &dev->mutex cpufreq_driver_lock irq_context: 0 &dev->mutex subsys mutex#31 irq_context: 0 &dev->mutex subsys mutex#31 &k->k_lock irq_context: 0 &dev->mutex thermal_list_lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock pool_lock irq_context: 0 (x86_mce_decoder_chain).rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &drv->dynids.lock irq_context: 0 &dev->mutex pci_config_lock irq_context: 0 &dev->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) running_helpers_waitq.lock irq_context: 0 &dev->mutex acpi_link_lock irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex acpi_link_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock &____s->seqcount irq_context: 0 &dev->mutex acpi_link_lock &c->lock irq_context: 0 &dev->mutex acpi_link_lock semaphore->lock irq_context: 0 &dev->mutex acpi_link_lock &obj_hash[i].lock irq_context: 0 &dev->mutex acpi_link_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex acpi_link_lock &zone->lock irq_context: 0 &dev->mutex acpi_link_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_link_lock &rq->__lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &meta->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock kfence_freelist_lock irq_context: 0 &dev->mutex acpi_link_lock pci_config_lock irq_context: 0 &dev->mutex acpi_link_lock (console_sem).lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex acpi_ioapic_lock irq_context: 0 &dev->mutex acpi_ioapic_lock ioapic_mutex irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex virtio_index_ida.xa_lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#32 irq_context: 0 &dev->mutex fwnode_link_lock &k->k_lock irq_context: softirq rcu_callback quarantine_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &dev->mutex acpi_link_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex &md->mutex irq_context: 0 &dev->mutex &md->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex memtype_lock irq_context: 0 &dev->mutex free_vmap_area_lock irq_context: 0 &dev->mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &dev->mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &dev->mutex vmap_area_lock irq_context: 0 &dev->mutex &md->mutex pci_config_lock irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#3 irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#3 pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &c->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex vector_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex vector_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex lock irq_context: 0 &dev->mutex &md->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &desc->request_mutex irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &dev->mutex register_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex register_lock fs_reclaim irq_context: 0 &dev->mutex register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_lock pool_lock#2 irq_context: 0 &dev->mutex register_lock proc_inum_ida.xa_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex register_lock &c->lock irq_context: 0 &dev->mutex register_lock &____s->seqcount irq_context: 0 &dev->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &dev->vqs_list_lock irq_context: 0 &dev->mutex &vp_dev->lock irq_context: 0 &dev->mutex register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex register_lock &zone->lock irq_context: 0 &dev->mutex register_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock irq_context: 0 &dev->mutex &s->s_inode_list_lock irq_context: 0 &dev->mutex (oom_notify_list).rwsem irq_context: 0 &dev->mutex &dev->config_lock irq_context: 0 vdpa_dev_lock irq_context: 0 subsys mutex#33 irq_context: 0 subsys mutex#33 &k->k_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock irq_context: 0 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 quarantine_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock quarantine_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock irq_context: 0 fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex pool_lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &obj_hash[i].lock pool_lock irq_context: 0 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) quarantine_lock irq_context: softirq rcu_callback &meta->lock irq_context: softirq rcu_callback kfence_freelist_lock irq_context: 0 &mm->mmap_lock &rq->__lock irq_context: 0 &dev->mutex pnp_lock irq_context: 0 &dev->mutex serial_mutex irq_context: 0 &dev->mutex serial_mutex gpio_lookup_lock irq_context: 0 &dev->mutex serial_mutex port_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex resource_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &port_lock_key irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex (console_sem).lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kfence_freelist_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex ctrl_ida.xa_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &x->wait#9 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex bus_type_sem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dpm_list_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex uevent_sock_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#34 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex semaphore->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex deferred_probe_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex device_links_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex req_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &x->wait#11 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#21 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#21 &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex chrdevs_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rng_index_ida.xa_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &pcp->lock &zone->lock &____s->seqcount irq_context: hardirq &x->wait#12 irq_context: 0 &dev->mutex rng_mutex irq_context: 0 &dev->mutex rng_mutex &x->wait#13 irq_context: 0 &dev->mutex rng_mutex fs_reclaim irq_context: 0 &dev->mutex rng_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rng_mutex pool_lock#2 irq_context: 0 &dev->mutex rng_mutex kthread_create_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rng_mutex &rq->__lock irq_context: 0 &dev->mutex rng_mutex &x->wait irq_context: 0 &dev->mutex rng_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rng_mutex &obj_hash[i].lock irq_context: 0 rng_mutex irq_context: 0 reading_mutex irq_context: 0 &dev->mutex reading_mutex irq_context: 0 &dev->mutex reading_mutex reading_mutex.wait_lock irq_context: 0 &dev->mutex reading_mutex &rq->__lock irq_context: 0 reading_mutex.wait_lock irq_context: 0 &dev->mutex reading_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex input_pool.lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 klist_remove_lock irq_context: 0 &k->k_lock klist_remove_lock irq_context: 0 kernfs_idr_lock irq_context: 0 slab_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 slab_mutex fs_reclaim &rq->__lock irq_context: 0 &dev->devres_lock irq_context: 0 &dev->managed.lock irq_context: 0 &type->s_umount_key#23/1 irq_context: 0 &type->s_umount_key#23/1 fs_reclaim irq_context: 0 &type->s_umount_key#23/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 pool_lock#2 irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#23/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#23/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#23/1 sb_lock irq_context: 0 &type->s_umount_key#23/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#23/1 &c->lock irq_context: 0 &type->s_umount_key#23/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#23/1 &zone->lock irq_context: 0 &type->s_umount_key#23/1 &____s->seqcount irq_context: 0 &type->s_umount_key#23/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#21 irq_context: 0 &type->s_umount_key#23/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#23/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#21 &dentry->d_lock irq_context: 0 &type->s_umount_key#23/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#21 irq_context: 0 lock drm_minor_lock irq_context: 0 lock drm_minor_lock pool_lock#2 irq_context: 0 stack_depot_init_mutex irq_context: 0 &dev->debugfs_mutex irq_context: 0 subsys mutex#35 irq_context: 0 subsys mutex#35 &k->k_lock irq_context: 0 drm_minor_lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: hardirq &rt_b->rt_runtime_lock irq_context: hardirq &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: hardirq &rt_rq->rt_runtime_lock irq_context: 0 (worker)->lock irq_context: 0 &dev->mode_config.idr_mutex irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mode_config.idr_mutex pool_lock#2 irq_context: 0 crtc_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &dev->mode_config.blob_lock irq_context: 0 &xa->xa_lock#4 irq_context: 0 &xa->xa_lock#5 irq_context: 0 &dev->mode_config.connector_list_lock irq_context: 0 &dev->vbl_lock irq_context: 0 drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 drm_connector_list_iter fs_reclaim irq_context: 0 drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter pool_lock#2 irq_context: 0 drm_connector_list_iter &c->lock irq_context: 0 drm_connector_list_iter &____s->seqcount irq_context: 0 gdp_mutex &c->lock irq_context: 0 gdp_mutex &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &x->wait#9 irq_context: 0 drm_connector_list_iter &connector->mutex &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->list_lock irq_context: 0 drm_connector_list_iter &connector->mutex &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex lock irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex bus_type_sem irq_context: 0 drm_connector_list_iter &connector->mutex sysfs_symlink_target_lock irq_context: 0 drm_connector_list_iter &connector->mutex &pcp->lock &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &dev->power.lock irq_context: 0 drm_connector_list_iter &connector->mutex dpm_list_mtx irq_context: 0 drm_connector_list_iter &connector->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex uevent_sock_mutex irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 drm_connector_list_iter &connector->mutex running_helpers_waitq.lock irq_context: 0 drm_connector_list_iter &connector->mutex &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex &cfs_rq->removed.lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#35 irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#35 &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex pin_fs_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &dev->mode_config.idr_mutex irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 drm_connector_list_iter &connector->mutex connector_list_lock irq_context: 0 &rq->__lock rcu_read_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->filelist_mutex irq_context: 0 &helper->lock irq_context: 0 &helper->lock drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &helper->lock drm_connector_list_iter fs_reclaim irq_context: 0 &helper->lock drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock drm_connector_list_iter pool_lock#2 irq_context: 0 &helper->lock fs_reclaim irq_context: 0 &helper->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock pool_lock#2 irq_context: 0 &helper->lock &client->modeset_mutex irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex pool_lock#2 irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex &obj_hash[i].lock irq_context: 0 &helper->lock &client->modeset_mutex fs_reclaim irq_context: 0 &helper->lock &client->modeset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock &client->modeset_mutex pool_lock#2 irq_context: 0 &helper->lock &obj_hash[i].lock irq_context: 0 &helper->lock &client->modeset_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &helper->lock &sbinfo->stat_lock irq_context: 0 &helper->lock mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock &sb->s_type->i_lock_key irq_context: 0 &helper->lock &s->s_inode_list_lock irq_context: 0 &helper->lock tk_core.seq.seqcount irq_context: 0 &helper->lock batched_entropy_u32.lock irq_context: 0 &helper->lock &c->lock irq_context: 0 &helper->lock &____s->seqcount irq_context: 0 &helper->lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &helper->lock &mgr->vm_lock irq_context: 0 &helper->lock &mgr->vm_lock pool_lock#2 irq_context: 0 &helper->lock &dev->object_name_lock irq_context: 0 &helper->lock &dev->object_name_lock lock irq_context: 0 &helper->lock &dev->object_name_lock lock &file_private->table_lock irq_context: 0 &helper->lock &dev->object_name_lock lock &file_private->table_lock &c->lock irq_context: 0 &helper->lock &dev->object_name_lock lock &file_private->table_lock &____s->seqcount irq_context: 0 &helper->lock &dev->object_name_lock lock &file_private->table_lock pool_lock#2 irq_context: 0 &helper->lock &node->vm_lock irq_context: 0 &helper->lock &file_private->table_lock irq_context: 0 &helper->lock &dev->mode_config.idr_mutex irq_context: 0 &helper->lock &dev->mode_config.fb_lock irq_context: 0 &helper->lock &file->fbs_lock irq_context: 0 &helper->lock &prime_fpriv->lock irq_context: 0 &helper->lock &node->vm_lock &obj_hash[i].lock irq_context: 0 &helper->lock &node->vm_lock pool_lock#2 irq_context: 0 &helper->lock &file_private->table_lock &obj_hash[i].lock irq_context: 0 &helper->lock &file_private->table_lock pool_lock#2 irq_context: 0 &helper->lock free_vmap_area_lock irq_context: 0 &helper->lock vmap_area_lock irq_context: 0 &helper->lock &pcp->lock &zone->lock irq_context: 0 &helper->lock &zone->lock irq_context: 0 &helper->lock init_mm.page_table_lock irq_context: 0 &helper->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &helper->lock &rq->__lock irq_context: 0 registration_lock irq_context: 0 registration_lock fs_reclaim irq_context: 0 registration_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock pool_lock#2 irq_context: 0 registration_lock &x->wait#9 irq_context: 0 registration_lock &obj_hash[i].lock irq_context: 0 registration_lock &k->list_lock irq_context: 0 registration_lock gdp_mutex irq_context: 0 registration_lock gdp_mutex &k->list_lock irq_context: 0 registration_lock gdp_mutex fs_reclaim irq_context: 0 registration_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock gdp_mutex &pcp->lock &zone->lock irq_context: 0 registration_lock gdp_mutex &zone->lock irq_context: 0 registration_lock gdp_mutex &____s->seqcount irq_context: 0 registration_lock gdp_mutex pool_lock#2 irq_context: 0 registration_lock gdp_mutex rcu_read_lock pool_lock#2 irq_context: 0 registration_lock gdp_mutex &obj_hash[i].lock irq_context: 0 registration_lock gdp_mutex lock irq_context: 0 registration_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 registration_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 registration_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 registration_lock lock irq_context: 0 registration_lock lock kernfs_idr_lock irq_context: 0 registration_lock &root->kernfs_rwsem irq_context: 0 registration_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 registration_lock bus_type_sem irq_context: 0 registration_lock &c->lock irq_context: 0 registration_lock &____s->seqcount irq_context: 0 registration_lock sysfs_symlink_target_lock irq_context: 0 registration_lock &root->kernfs_rwsem irq_context: 0 registration_lock &dev->power.lock irq_context: 0 registration_lock dpm_list_mtx irq_context: 0 registration_lock req_lock irq_context: 0 registration_lock &p->pi_lock irq_context: 0 registration_lock &p->pi_lock &rq->__lock irq_context: 0 registration_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock &rq->__lock irq_context: 0 registration_lock &x->wait#11 irq_context: 0 registration_lock uevent_sock_mutex irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock running_helpers_waitq.lock irq_context: 0 registration_lock &k->k_lock irq_context: 0 registration_lock subsys mutex#11 irq_context: 0 registration_lock subsys mutex#11 &k->k_lock irq_context: 0 registration_lock vt_switch_mutex irq_context: 0 registration_lock vt_switch_mutex fs_reclaim irq_context: 0 registration_lock vt_switch_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock vt_switch_mutex pool_lock#2 irq_context: 0 registration_lock (console_sem).lock irq_context: 0 registration_lock console_lock irq_context: 0 registration_lock console_lock &fb_info->lock irq_context: 0 registration_lock console_lock fs_reclaim irq_context: 0 registration_lock console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock pool_lock#2 irq_context: 0 registration_lock console_lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &pcp->lock &zone->lock irq_context: 0 registration_lock console_lock &zone->lock irq_context: 0 registration_lock console_lock &____s->seqcount irq_context: 0 registration_lock console_lock rcu_read_lock pool_lock#2 irq_context: 0 registration_lock console_lock &base->lock irq_context: 0 registration_lock console_lock &base->lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &x->wait#9 irq_context: 0 registration_lock console_lock &k->list_lock irq_context: 0 registration_lock console_lock gdp_mutex irq_context: 0 registration_lock console_lock gdp_mutex &k->list_lock irq_context: 0 registration_lock console_lock lock irq_context: 0 registration_lock console_lock lock kernfs_idr_lock irq_context: 0 registration_lock console_lock &root->kernfs_rwsem irq_context: 0 registration_lock console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 registration_lock console_lock bus_type_sem irq_context: 0 registration_lock console_lock sysfs_symlink_target_lock irq_context: 0 registration_lock console_lock &root->kernfs_rwsem irq_context: 0 registration_lock console_lock &c->lock irq_context: 0 registration_lock console_lock &dev->power.lock irq_context: 0 registration_lock console_lock dpm_list_mtx irq_context: 0 registration_lock console_lock uevent_sock_mutex irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock console_lock running_helpers_waitq.lock irq_context: 0 registration_lock console_lock &rq->__lock irq_context: 0 registration_lock console_lock subsys mutex#5 irq_context: 0 registration_lock console_lock subsys mutex#5 &k->k_lock irq_context: 0 registration_lock console_lock vga_lock irq_context: 0 registration_lock console_lock &helper->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire &c->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->mode_config.idr_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->mode_config.blob_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &crtc->commit_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &pcp->lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex rcu_read_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &c->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &pcp->lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#6 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock lock#4 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &info->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#6 pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock lock#4 &lruvec->lru_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#6 &c->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#6 &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &rq->__lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock free_vmap_area_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock vmap_area_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &pcp->lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock init_mm.page_table_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex tk_core.seq.seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &____s->seqcount#6 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &x->wait#14 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock tk_core.seq.seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock &____s->seqcount#6 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &x->wait#14 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (work_completion)(&vkms_state->composer_work) irq_context: 0 registration_lock console_lock &helper->damage_lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock console_lock &helper->lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->damage_lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock tk_core.seq.seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vblank_time_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &base->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &base->lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &vkms_out->lock irq_context: hardirq &vkms_out->lock &dev->event_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock &____s->seqcount#6 irq_context: hardirq &vkms_out->lock &dev->event_lock &vblank->queue irq_context: hardirq &vkms_out->lock &dev->event_lock &____s->seqcount#6 irq_context: hardirq &vkms_out->lock &dev->event_lock &obj_hash[i].lock irq_context: hardirq &vkms_out->lock &dev->event_lock &base->lock irq_context: hardirq &vkms_out->lock &dev->event_lock &base->lock &obj_hash[i].lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &vkms_out->lock &dev->event_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (&timer.timer) irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (work_completion)(&vkms_state->composer_work)#2 irq_context: 0 registration_lock console_lock &lock->wait_lock irq_context: 0 registration_lock console_lock &p->pi_lock irq_context: 0 registration_lock console_lock &p->pi_lock &rq->__lock irq_context: 0 registration_lock console_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock reservation_ww_class_mutex irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock reservation_ww_class_mutex &shmem->vmap_lock irq_context: 0 registration_lock console_lock vt_event_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: hardirq &vb->stop_update_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_freezable irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &s->s_inode_list_lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock console_lock batched_entropy_u8.lock irq_context: 0 registration_lock console_lock kfence_freelist_lock irq_context: 0 registration_lock console_lock (console_sem).lock irq_context: 0 registration_lock console_lock console_owner_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter &c->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 registration_lock console_lock &meta->lock irq_context: 0 registration_lock console_lock console_srcu console_owner_lock irq_context: 0 registration_lock console_lock console_srcu console_owner irq_context: 0 registration_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 registration_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 kernel_fb_helper_lock irq_context: 0 &dev->clientlist_mutex irq_context: 0 &dev->queue_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->k_lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &set->tag_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock irq_context: 0 &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock &q->queue_lock irq_context: 0 lock &q->queue_lock &blkcg->lock irq_context: 0 lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#8 irq_context: 0 &q->queue_lock irq_context: 0 &q->queue_lock pool_lock#2 irq_context: 0 &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock irq_context: 0 &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock &blkcg->lock irq_context: 0 &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock irq_context: 0 &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &x->wait#9 irq_context: 0 subsys mutex#36 irq_context: 0 subsys mutex#36 &k->k_lock irq_context: 0 dev_hotplug_mutex irq_context: 0 dev_hotplug_mutex &dev->power.lock irq_context: 0 &q->sysfs_dir_lock irq_context: 0 &q->sysfs_dir_lock fs_reclaim irq_context: 0 &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &q->sysfs_dir_lock &c->lock irq_context: 0 &q->sysfs_dir_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &____s->seqcount irq_context: 0 subsys mutex#37 irq_context: 0 subsys mutex#37 &k->k_lock irq_context: 0 cgwb_lock irq_context: 0 bdi_lock irq_context: 0 inode_hash_lock irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 bdev_lock irq_context: 0 &disk->open_mutex irq_context: 0 &disk->open_mutex fs_reclaim irq_context: 0 &disk->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex pool_lock#2 irq_context: 0 &disk->open_mutex free_vmap_area_lock irq_context: 0 &disk->open_mutex vmap_area_lock irq_context: 0 &disk->open_mutex &pcp->lock &zone->lock irq_context: 0 &disk->open_mutex &zone->lock irq_context: 0 &disk->open_mutex &____s->seqcount irq_context: 0 &disk->open_mutex init_mm.page_table_lock irq_context: 0 &disk->open_mutex &xa->xa_lock#6 irq_context: 0 &disk->open_mutex lock#4 irq_context: 0 &disk->open_mutex mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex &c->lock irq_context: 0 &disk->open_mutex &mapping->private_lock irq_context: 0 &disk->open_mutex tk_core.seq.seqcount irq_context: 0 &disk->open_mutex &ret->b_uptodate_lock irq_context: 0 &disk->open_mutex &obj_hash[i].lock irq_context: 0 &disk->open_mutex &xa->xa_lock#6 pool_lock#2 irq_context: 0 &disk->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 &disk->open_mutex purge_vmap_area_lock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#6 irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &disk->open_mutex lock#4 &lruvec->lru_lock irq_context: 0 &disk->open_mutex lock#5 irq_context: 0 &disk->open_mutex &lruvec->lru_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock major_names_spinlock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rtc_lock irq_context: softirq rcu_callback percpu_ref_switch_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &q->sysfs_dir_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) register_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &irq_desc_lock_class irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) resource_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &q->queue_lock &c->lock irq_context: 0 &q->queue_lock &pcp->lock &zone->lock irq_context: 0 &q->queue_lock &zone->lock irq_context: 0 &q->queue_lock &____s->seqcount irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &disk->open_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 &disk->open_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock pool_lock#2 irq_context: 0 &disk->open_mutex &xa->xa_lock#6 &c->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#6 &pcp->lock &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#6 &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#6 &____s->seqcount irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) command_done.lock irq_context: softirq rcu_callback rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback percpu_counters_lock irq_context: 0 loop_ctl_mutex irq_context: 0 loop_ctl_mutex fs_reclaim irq_context: 0 loop_ctl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 loop_ctl_mutex pool_lock#2 irq_context: 0 &q->sysfs_lock irq_context: 0 &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_lock pool_lock#2 irq_context: 0 &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_lock &c->lock irq_context: 0 &q->sysfs_lock &n->list_lock irq_context: 0 &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_lock &zone->lock irq_context: 0 &q->sysfs_lock &____s->seqcount irq_context: 0 &q->sysfs_lock cpu_hotplug_lock irq_context: 0 &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 &q->sysfs_lock fs_reclaim irq_context: 0 &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_lock &xa->xa_lock#7 irq_context: 0 &set->tag_list_lock irq_context: 0 &q->mq_freeze_lock irq_context: 0 &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &q->mq_freeze_lock &q->mq_freeze_wq irq_context: softirq &(&ops->cursor_work)->timer irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) (console_sem).lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock &helper->damage_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock percpu_ref_switch_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->queue_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 &q->sysfs_dir_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &zone->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &c->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &zone->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 nbd_index_mutex irq_context: 0 nbd_index_mutex fs_reclaim irq_context: 0 nbd_index_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nbd_index_mutex pool_lock#2 irq_context: 0 set->srcu irq_context: 0 (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (work_completion)(&(&hctx->run_work)->work) irq_context: 0 &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock set->srcu irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &x->wait#11 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &n->list_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &n->list_lock &c->lock irq_context: 0 zram_index_mutex irq_context: 0 zram_index_mutex fs_reclaim irq_context: 0 zram_index_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex pool_lock#2 irq_context: 0 zram_index_mutex blk_queue_ida.xa_lock irq_context: 0 zram_index_mutex &obj_hash[i].lock irq_context: 0 zram_index_mutex pcpu_alloc_mutex irq_context: 0 zram_index_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 zram_index_mutex bio_slab_lock irq_context: 0 zram_index_mutex &____s->seqcount irq_context: 0 zram_index_mutex &pcp->lock &zone->lock irq_context: 0 zram_index_mutex &zone->lock irq_context: 0 zram_index_mutex rcu_read_lock pool_lock#2 irq_context: 0 zram_index_mutex percpu_counters_lock irq_context: 0 zram_index_mutex &c->lock irq_context: 0 zram_index_mutex mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_lock_key#3 irq_context: 0 zram_index_mutex &s->s_inode_list_lock irq_context: 0 zram_index_mutex &xa->xa_lock#8 irq_context: 0 zram_index_mutex &n->list_lock irq_context: 0 zram_index_mutex &n->list_lock &c->lock irq_context: 0 zram_index_mutex lock irq_context: 0 zram_index_mutex lock &q->queue_lock irq_context: 0 zram_index_mutex lock &q->queue_lock &blkcg->lock irq_context: 0 zram_index_mutex &q->queue_lock irq_context: 0 zram_index_mutex &q->queue_lock pool_lock#2 irq_context: 0 zram_index_mutex &obj_hash[i].lock pool_lock irq_context: 0 zram_index_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 zram_index_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 zram_index_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 zram_index_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 zram_index_mutex &q->queue_lock pcpu_lock irq_context: 0 zram_index_mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 zram_index_mutex &q->queue_lock percpu_counters_lock irq_context: 0 zram_index_mutex &x->wait#9 irq_context: 0 zram_index_mutex &bdev->bd_size_lock irq_context: 0 zram_index_mutex &k->list_lock irq_context: 0 zram_index_mutex gdp_mutex irq_context: 0 zram_index_mutex gdp_mutex &k->list_lock irq_context: 0 zram_index_mutex lock kernfs_idr_lock irq_context: 0 zram_index_mutex &root->kernfs_rwsem irq_context: 0 zram_index_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 zram_index_mutex bus_type_sem irq_context: 0 zram_index_mutex sysfs_symlink_target_lock irq_context: 0 zram_index_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 zram_index_mutex &root->kernfs_rwsem irq_context: 0 zram_index_mutex &dev->power.lock irq_context: 0 zram_index_mutex dpm_list_mtx irq_context: 0 zram_index_mutex req_lock irq_context: 0 zram_index_mutex &p->pi_lock irq_context: 0 zram_index_mutex &p->pi_lock &rq->__lock irq_context: 0 zram_index_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 zram_index_mutex &rq->__lock irq_context: 0 zram_index_mutex &cfs_rq->removed.lock irq_context: 0 zram_index_mutex &x->wait#11 irq_context: 0 zram_index_mutex subsys mutex#36 irq_context: 0 zram_index_mutex subsys mutex#36 &k->k_lock irq_context: 0 zram_index_mutex dev_hotplug_mutex irq_context: 0 zram_index_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 zram_index_mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &c->lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 zram_index_mutex percpu_ref_switch_lock irq_context: 0 zram_index_mutex uevent_sock_mutex irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 zram_index_mutex running_helpers_waitq.lock irq_context: 0 zram_index_mutex subsys mutex#37 irq_context: 0 zram_index_mutex subsys mutex#37 &k->k_lock irq_context: 0 zram_index_mutex cgwb_lock irq_context: 0 zram_index_mutex pin_fs_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 zram_index_mutex bdi_lock irq_context: 0 zram_index_mutex inode_hash_lock irq_context: 0 zram_index_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 zram_index_mutex (console_sem).lock irq_context: 0 zram_index_mutex console_lock console_srcu console_owner_lock irq_context: 0 zram_index_mutex console_lock console_srcu console_owner irq_context: 0 zram_index_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 zram_index_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 subsys mutex#38 irq_context: 0 subsys mutex#38 &k->k_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 configfs_dirent_lock irq_context: 0 &q->sysfs_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &lock irq_context: 0 &lock nullb_indexes.xa_lock irq_context: 0 &q->sysfs_dir_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &disk->open_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock &base->lock irq_context: 0 &disk->open_mutex rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock &ret->b_uptodate_lock irq_context: 0 ctx_list.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) pool_lock#2 irq_context: 0 nfc_index_ida.xa_lock irq_context: 0 nfc_devlist_mutex irq_context: 0 nfc_devlist_mutex fs_reclaim irq_context: 0 nfc_devlist_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfc_devlist_mutex pool_lock#2 irq_context: 0 nfc_devlist_mutex &k->list_lock irq_context: 0 nfc_devlist_mutex gdp_mutex irq_context: 0 nfc_devlist_mutex gdp_mutex &k->list_lock irq_context: 0 nfc_devlist_mutex gdp_mutex fs_reclaim irq_context: 0 nfc_devlist_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfc_devlist_mutex gdp_mutex pool_lock#2 irq_context: 0 nfc_devlist_mutex gdp_mutex lock irq_context: 0 nfc_devlist_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 nfc_devlist_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 nfc_devlist_mutex lock irq_context: 0 nfc_devlist_mutex lock kernfs_idr_lock irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 nfc_devlist_mutex bus_type_sem irq_context: 0 nfc_devlist_mutex sysfs_symlink_target_lock irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex &dev->power.lock irq_context: 0 nfc_devlist_mutex dpm_list_mtx irq_context: 0 nfc_devlist_mutex uevent_sock_mutex irq_context: 0 nfc_devlist_mutex &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 nfc_devlist_mutex running_helpers_waitq.lock irq_context: 0 nfc_devlist_mutex subsys mutex#39 irq_context: 0 nfc_devlist_mutex subsys mutex#39 &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex irq_context: 0 &dev->mutex rfkill_global_mutex fs_reclaim irq_context: 0 &dev->mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rfkill_global_mutex pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex &c->lock irq_context: 0 &dev->mutex rfkill_global_mutex &____s->seqcount irq_context: 0 &dev->mutex rfkill_global_mutex &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex &k->list_lock irq_context: 0 &dev->mutex rfkill_global_mutex lock irq_context: 0 &dev->mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex bus_type_sem irq_context: 0 &dev->mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex &dev->power.lock irq_context: 0 &dev->mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 &dev->mutex rfkill_global_mutex &rfkill->lock irq_context: 0 &dev->mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 &dev->mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rfkill_global_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex rfkill_global_mutex &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex subsys mutex#40 irq_context: 0 &dev->mutex rfkill_global_mutex subsys mutex#40 &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex triggers_list_lock irq_context: 0 &dev->mutex rfkill_global_mutex leds_list_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex.wait_lock irq_context: 0 &dev->mutex &p->pi_lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rfkill->lock irq_context: 0 &pool->lock &base->lock irq_context: 0 &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex &c->lock irq_context: 0 nfc_devlist_mutex &____s->seqcount irq_context: 0 nfc_devlist_mutex &pcp->lock &zone->lock irq_context: 0 nfc_devlist_mutex &zone->lock irq_context: 0 nfc_devlist_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 dma_heap_minors.xa_lock irq_context: 0 subsys mutex#41 irq_context: 0 subsys mutex#41 &k->k_lock irq_context: 0 heap_list_lock irq_context: 0 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 uevent_sock_mutex &rq->__lock irq_context: 0 &dev->mutex host_index_ida.xa_lock irq_context: 0 &dev->mutex kthread_create_lock irq_context: 0 &dev->mutex &x->wait irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &dev->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex subsys mutex#9 irq_context: 0 &dev->mutex wq_pool_mutex irq_context: 0 &dev->mutex wq_pool_mutex &wq->mutex irq_context: 0 &dev->mutex &md->mutex &c->lock irq_context: 0 &dev->mutex &md->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &zone->lock irq_context: 0 &dev->mutex &md->mutex &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &n->list_lock irq_context: 0 &dev->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class tmp_mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class tmp_mask_lock vector_lock irq_context: 0 &dev->mutex &desc->request_mutex &rq->__lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex fs_reclaim irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pool_lock#2 irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex &c->lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex &n->list_lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex batched_entropy_u32.lock irq_context: 0 &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex subsys mutex#42 irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#43 irq_context: 0 &dev->mutex subsys mutex#43 &k->k_lock irq_context: 0 &dev->mutex attribute_container_mutex irq_context: 0 &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex &virtscsi_vq->vq_lock irq_context: 0 &dev->mutex &shost->scan_mutex irq_context: 0 &dev->mutex &shost->scan_mutex fs_reclaim irq_context: 0 &dev->mutex &shost->scan_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &shost->scan_mutex pool_lock#2 irq_context: 0 &dev->mutex &shost->scan_mutex shost->host_lock irq_context: 0 &dev->mutex async_scan_lock irq_context: 0 &dev->mutex async_scan_lock &x->wait#15 irq_context: 0 &dev->mutex async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex shost->host_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex attribute_container_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#16 irq_context: hardirq &virtscsi_vq->vq_lock irq_context: softirq &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (&q->timeout) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&q->timeout_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&sdev->requeue_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&sdev->event_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->inquiry_mutex irq_context: softirq &x->wait#16 &p->pi_lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 major_names_lock &c->lock irq_context: 0 major_names_lock &____s->seqcount irq_context: 0 subsys mutex#44 irq_context: 0 subsys mutex#44 &k->list_lock irq_context: 0 subsys mutex#44 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 nvmf_hosts_mutex irq_context: 0 subsys mutex#45 irq_context: 0 subsys mutex#45 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 nvmf_transports_rwsem irq_context: 0 subsys mutex#46 irq_context: 0 subsys mutex#46 &k->k_lock irq_context: 0 pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_callback pcpu_lock irq_context: softirq rcu_callback percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 nvmet_config_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 subsys mutex#47 irq_context: 0 subsys mutex#47 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6/2 irq_context: 0 backend_mutex irq_context: 0 scsi_mib_index_lock irq_context: 0 hba_lock irq_context: 0 device_mutex irq_context: 0 device_mutex fs_reclaim irq_context: 0 device_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 device_mutex pool_lock#2 irq_context: 0 &hba->device_lock irq_context: 0 rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 mtd_table_mutex irq_context: 0 part_parser_lock irq_context: 0 (kmod_concurrent_max).lock irq_context: 0 &x->wait#17 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sig->wait_chldexit irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &prev->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &n->list_lock irq_context: softirq &x->wait#16 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &cfs_rq->removed.lock irq_context: 0 mtd_table_mutex fs_reclaim irq_context: 0 mtd_table_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex pool_lock#2 irq_context: 0 mtd_table_mutex &x->wait#9 irq_context: 0 mtd_table_mutex &obj_hash[i].lock irq_context: 0 mtd_table_mutex &k->list_lock irq_context: 0 mtd_table_mutex gdp_mutex irq_context: 0 mtd_table_mutex gdp_mutex &k->list_lock irq_context: 0 mtd_table_mutex gdp_mutex fs_reclaim irq_context: 0 mtd_table_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex gdp_mutex pool_lock#2 irq_context: 0 mtd_table_mutex gdp_mutex lock irq_context: 0 mtd_table_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 mtd_table_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex lock irq_context: 0 mtd_table_mutex lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex bus_type_sem irq_context: 0 mtd_table_mutex sysfs_symlink_target_lock irq_context: 0 mtd_table_mutex &c->lock irq_context: 0 mtd_table_mutex &____s->seqcount irq_context: 0 mtd_table_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 mtd_table_mutex &dev->power.lock irq_context: 0 mtd_table_mutex dpm_list_mtx irq_context: 0 mtd_table_mutex req_lock irq_context: 0 mtd_table_mutex &p->pi_lock irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex &x->wait#11 irq_context: 0 mtd_table_mutex &rq->__lock irq_context: 0 mtd_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex uevent_sock_mutex irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex running_helpers_waitq.lock irq_context: 0 mtd_table_mutex subsys mutex#48 irq_context: 0 mtd_table_mutex subsys mutex#48 &k->k_lock irq_context: 0 mtd_table_mutex devtree_lock irq_context: 0 mtd_table_mutex nvmem_ida.xa_lock irq_context: 0 mtd_table_mutex nvmem_cell_mutex irq_context: 0 mtd_table_mutex &k->k_lock irq_context: 0 mtd_table_mutex &dev->mutex &dev->power.lock irq_context: 0 mtd_table_mutex &dev->mutex &k->list_lock irq_context: 0 mtd_table_mutex &dev->mutex &k->k_lock irq_context: 0 mtd_table_mutex subsys mutex#49 irq_context: 0 mtd_table_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex (console_sem).lock irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner_lock irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 mtd_table_mutex pcpu_alloc_mutex irq_context: 0 mtd_table_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 mtd_table_mutex cpu_hotplug_lock irq_context: 0 mtd_table_mutex &n->list_lock irq_context: 0 mtd_table_mutex &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &zone->lock irq_context: 0 mtd_table_mutex batched_entropy_u32.lock irq_context: 0 mtd_table_mutex mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex rcu_read_lock pool_lock#2 irq_context: 0 mtd_table_mutex blk_queue_ida.xa_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_lock cpu_hotplug_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 mtd_table_mutex &q->sysfs_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_lock &xa->xa_lock#7 irq_context: 0 mtd_table_mutex &set->tag_list_lock irq_context: 0 mtd_table_mutex &obj_hash[i].lock pool_lock irq_context: 0 mtd_table_mutex bio_slab_lock irq_context: 0 mtd_table_mutex percpu_counters_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_lock_key#3 irq_context: 0 mtd_table_mutex &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &xa->xa_lock#8 irq_context: 0 mtd_table_mutex lock &q->queue_lock irq_context: 0 mtd_table_mutex lock &q->queue_lock &blkcg->lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex set->srcu irq_context: 0 mtd_table_mutex percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->queue_lock irq_context: 0 mtd_table_mutex &q->queue_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->queue_lock &c->lock irq_context: 0 mtd_table_mutex &q->queue_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->queue_lock pcpu_lock irq_context: 0 mtd_table_mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->queue_lock percpu_counters_lock irq_context: 0 mtd_table_mutex &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 mtd_table_mutex &bdev->bd_size_lock irq_context: 0 mtd_table_mutex elv_list_lock irq_context: 0 mtd_table_mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 mtd_table_mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 mtd_table_mutex &q->debugfs_mutex irq_context: 0 mtd_table_mutex &cfs_rq->removed.lock irq_context: 0 mtd_table_mutex subsys mutex#36 irq_context: 0 mtd_table_mutex subsys mutex#36 &k->k_lock irq_context: 0 mtd_table_mutex dev_hotplug_mutex irq_context: 0 mtd_table_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock set->srcu irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->queue_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 mtd_table_mutex subsys mutex#37 irq_context: 0 mtd_table_mutex subsys mutex#37 &k->k_lock irq_context: 0 mtd_table_mutex cgwb_lock irq_context: 0 mtd_table_mutex bdi_lock irq_context: 0 mtd_table_mutex inode_hash_lock irq_context: 0 mtd_table_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 rtnl_mutex stack_depot_init_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex kthread_create_lock irq_context: 0 rtnl_mutex &p->pi_lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &x->wait irq_context: 0 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex irq_context: 0 rtnl_mutex crngs.lock irq_context: 0 rtnl_mutex net_rwsem irq_context: 0 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex &x->wait#9 irq_context: 0 rtnl_mutex &k->list_lock irq_context: 0 rtnl_mutex gdp_mutex irq_context: 0 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex bus_type_sem irq_context: 0 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &dev->power.lock irq_context: 0 rtnl_mutex dpm_list_mtx irq_context: 0 rtnl_mutex uevent_sock_mutex irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex running_helpers_waitq.lock irq_context: 0 rtnl_mutex subsys mutex#17 irq_context: 0 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex &dir->lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_hotplug_mutex irq_context: 0 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex dev_base_lock irq_context: 0 rtnl_mutex input_pool.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex batched_entropy_u32.lock irq_context: 0 rtnl_mutex &tbl->lock irq_context: 0 rtnl_mutex sysctl_lock irq_context: 0 (wq_completion)gid-cache-wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem irq_context: 0 rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) pool_lock#2 irq_context: 0 rtnl_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 rtnl_mutex lweventlist_lock irq_context: 0 rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 once_lock irq_context: 0 once_lock crngs.lock irq_context: 0 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work) irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work) &obj_hash[i].lock irq_context: 0 (inet6addr_validator_chain).rwsem irq_context: 0 (inetaddr_validator_chain).rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 subsys mutex#50 irq_context: 0 subsys mutex#50 &k->k_lock irq_context: 0 gpio_lookup_lock irq_context: 0 mdio_board_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 mode_list_lock irq_context: 0 misc_mtx &cfs_rq->removed.lock irq_context: 0 &dev->mutex stack_depot_init_mutex irq_context: 0 &dev->mutex napi_hash_lock irq_context: 0 &dev->mutex &md->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex cpu_hotplug_lock &md->mutex irq_context: 0 &dev->mutex cpu_hotplug_lock &irq_desc_lock_class irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex fs_reclaim irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex pool_lock#2 irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex text_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex krc.lock irq_context: 0 &dev->mutex rtnl_mutex irq_context: 0 &dev->mutex rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rtnl_mutex &zone->lock irq_context: 0 &dev->mutex rtnl_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex &c->lock irq_context: 0 &dev->mutex rtnl_mutex net_rwsem irq_context: 0 &dev->mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &dev->mutex rtnl_mutex &x->wait#9 irq_context: 0 &dev->mutex rtnl_mutex &k->list_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &dev->mutex rtnl_mutex lock irq_context: 0 &dev->mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rtnl_mutex bus_type_sem irq_context: 0 &dev->mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex &dev->power.lock irq_context: 0 &dev->mutex rtnl_mutex dpm_list_mtx irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex rtnl_mutex &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#17 irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex &dir->lock#2 irq_context: 0 &dev->mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 &dev->mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &dev->mutex rtnl_mutex dev_base_lock irq_context: 0 &dev->mutex rtnl_mutex input_pool.lock irq_context: 0 &dev->mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 &dev->mutex rtnl_mutex &tbl->lock irq_context: 0 &dev->mutex rtnl_mutex sysctl_lock irq_context: 0 &dev->mutex rtnl_mutex nl_table_lock irq_context: 0 &dev->mutex rtnl_mutex nl_table_wait.lock irq_context: 0 &dev->mutex rtnl_mutex quarantine_lock irq_context: 0 &dev->mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: hardirq &irq_desc_lock_class tmp_mask_lock irq_context: hardirq &irq_desc_lock_class tmp_mask_lock vector_lock irq_context: hardirq|softirq &irq_desc_lock_class vector_lock irq_context: 0 &dev->mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex lweventlist_lock irq_context: 0 &dev->mutex lweventlist_lock pool_lock#2 irq_context: 0 &dev->mutex lweventlist_lock &dir->lock#2 irq_context: 0 &dev->mutex &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&vi->config_work) irq_context: 0 l3mdev_lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &meta->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#15 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#42 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#42 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bio_slab_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex sd_index_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#51 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#51 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 console_owner_lock irq_context: 0 console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 lock sg_index_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 lock sg_index_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 chrdevs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 subsys mutex#52 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 subsys mutex#52 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex bsg_minor_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex chrdevs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#53 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#53 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_scan_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->power.lock irq_context: 0 subsys mutex#54 irq_context: 0 subsys mutex#54 &k->k_lock irq_context: 0 compressor_list_lock irq_context: 0 compressor_list_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex elv_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#11 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#36 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#36 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dev_hotplug_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#37 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#37 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex cgwb_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bdi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex inode_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bdev_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#6 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &mapping->private_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &dd->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &folio_wait_table[i] irq_context: 0 (wq_completion)kblockd irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &dd->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq &ret->b_uptodate_lock irq_context: softirq &folio_wait_table[i] irq_context: softirq &folio_wait_table[i] &p->pi_lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex subsys mutex#36 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex subsys mutex#36 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#8 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex inode_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex purge_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#6 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &lruvec->lru_lock irq_context: 0 batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 hwsim_radio_lock irq_context: 0 subsys mutex#55 irq_context: 0 subsys mutex#55 &k->k_lock irq_context: 0 deferred_probe_mutex irq_context: 0 rtnl_mutex param_lock irq_context: 0 rtnl_mutex param_lock rate_ctrl_mutex irq_context: 0 rtnl_mutex (console_sem).lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 &tsk->futex_exit_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex kobj_ns_type_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx running_helpers_waitq.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#56 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#56 &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 rtnl_mutex &base->lock irq_context: 0 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 rfkill_global_mutex irq_context: 0 rfkill_global_mutex fs_reclaim irq_context: 0 rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rfkill_global_mutex pool_lock#2 irq_context: 0 rfkill_global_mutex &k->list_lock irq_context: 0 rfkill_global_mutex lock irq_context: 0 rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rfkill_global_mutex bus_type_sem irq_context: 0 rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 rfkill_global_mutex &c->lock irq_context: 0 rfkill_global_mutex &pcp->lock &zone->lock irq_context: 0 rfkill_global_mutex &zone->lock irq_context: 0 rfkill_global_mutex &____s->seqcount irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 rfkill_global_mutex &dev->power.lock irq_context: 0 rfkill_global_mutex dpm_list_mtx irq_context: 0 rfkill_global_mutex &rfkill->lock irq_context: 0 rfkill_global_mutex uevent_sock_mutex irq_context: 0 rfkill_global_mutex &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rfkill_global_mutex running_helpers_waitq.lock irq_context: 0 rfkill_global_mutex &k->k_lock irq_context: 0 rfkill_global_mutex subsys mutex#40 irq_context: 0 rfkill_global_mutex subsys mutex#40 &k->k_lock irq_context: 0 rfkill_global_mutex triggers_list_lock irq_context: 0 rfkill_global_mutex leds_list_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex &rq->__lock irq_context: 0 rfkill_global_mutex &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex.wait_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex crngs.lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#57 irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#57 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 &dev->mutex rtnl_mutex crngs.lock irq_context: 0 &dev->mutex rtnl_mutex &sdata->sec_mtx irq_context: 0 &dev->mutex rtnl_mutex &sdata->sec_mtx &sec->lock irq_context: 0 &dev->mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex rtnl_mutex &local->iflist_mtx#2 irq_context: 0 &dev->mutex hwsim_phys_lock irq_context: 0 &dev->mutex nl_table_lock irq_context: 0 &dev->mutex nl_table_wait.lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex hwsim_phys_lock fs_reclaim irq_context: 0 &dev->mutex hwsim_phys_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex hwsim_phys_lock pool_lock#2 irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 xdomain_lock irq_context: 0 xdomain_lock fs_reclaim irq_context: 0 xdomain_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 xdomain_lock pool_lock#2 irq_context: 0 ioctl_mutex irq_context: 0 address_handler_list_lock irq_context: 0 card_mutex irq_context: 0 subsys mutex#58 irq_context: 0 subsys mutex#58 &k->k_lock irq_context: 0 &x->wait#18 irq_context: 0 &x->wait#18 &p->pi_lock irq_context: 0 &x->wait#18 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#18 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &txlock irq_context: 0 &txlock &list->lock#3 irq_context: 0 &txlock &txwq irq_context: 0 &iocq[i].lock irq_context: 0 &iocq[i].lock &ktiowq[i] irq_context: 0 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &zone->lock irq_context: 0 rcu_read_lock &____s->seqcount irq_context: 0 &txwq irq_context: 0 &txwq &p->pi_lock irq_context: 0 &txwq &p->pi_lock &rq->__lock irq_context: 0 &txwq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh pool_lock#2 irq_context: 0 subsys mutex#59 irq_context: 0 subsys mutex#59 &k->k_lock irq_context: 0 usb_bus_idr_lock irq_context: 0 usb_bus_idr_lock (usb_notifier_list).rwsem irq_context: 0 table_lock irq_context: 0 table_lock &k->list_lock irq_context: 0 table_lock fs_reclaim irq_context: 0 table_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 table_lock pool_lock#2 irq_context: 0 table_lock &c->lock irq_context: 0 table_lock &pcp->lock &zone->lock irq_context: 0 table_lock &zone->lock irq_context: 0 table_lock &____s->seqcount irq_context: 0 table_lock lock irq_context: 0 table_lock lock kernfs_idr_lock irq_context: 0 table_lock &root->kernfs_rwsem irq_context: 0 table_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 table_lock &k->k_lock irq_context: 0 table_lock uevent_sock_mutex irq_context: 0 table_lock &obj_hash[i].lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 table_lock running_helpers_waitq.lock irq_context: 0 table_lock &rq->__lock irq_context: 0 table_lock (console_sem).lock irq_context: 0 table_lock console_lock console_srcu console_owner_lock irq_context: 0 table_lock console_lock console_srcu console_owner irq_context: 0 table_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 table_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq net/core/link_watch.c:31 irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 table_lock batched_entropy_u8.lock irq_context: 0 table_lock kfence_freelist_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: hardirq allocation_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 table_lock lock kernfs_idr_lock &c->lock irq_context: 0 table_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 table_lock lock kernfs_idr_lock &zone->lock irq_context: 0 table_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 table_lock lock kernfs_idr_lock pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 table_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex devtree_lock irq_context: 0 &dev->mutex usb_bus_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem pin_fs_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &x->wait#9 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &k->list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &k->list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem bus_type_sem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &dev->power.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem dpm_list_mtx irq_context: 0 &dev->mutex (usb_notifier_list).rwsem req_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &x->wait#11 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem uevent_sock_mutex irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem running_helpers_waitq.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &k->k_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem subsys mutex#59 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem subsys mutex#59 &k->k_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem mon_lock irq_context: 0 &dev->mutex usb_port_peer_mutex irq_context: 0 &dev->mutex device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &rq->__lock irq_context: softirq &bh->lock irq_context: softirq lock#6 irq_context: softirq lock#6 kcov_remote_lock irq_context: softirq &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock (console_sem).lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock input_pool.lock irq_context: 0 &dev->mutex usb_bus_idr_lock batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock req_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#11 irq_context: 0 &dev->mutex usb_bus_idr_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#2 &c->lock irq_context: 0 &type->i_mutex_dir_key#2 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#2 &zone->lock irq_context: 0 &type->i_mutex_dir_key#2 &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_links_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex set_config_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex devtree_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &new_driver->dynids.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx pm_qos_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex component_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &rq->__lock irq_context: 0 (wq_completion)pm irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &lock->wait_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex subsys mutex#60 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex (usb_notifier_list).rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &lock->wait_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &dum_hcd->dum->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &bh->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &x->wait#19 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#19 &p->pi_lock irq_context: softirq &x->wait#19 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#19 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex (&timer.timer) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex hcd_root_hub_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)usb_hub_wq irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) lock#6 irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex usb_bus_idr_lock subsys mutex#60 irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &hub->irq_urb_lock irq_context: 0 &dev->mutex usb_bus_idr_lock (&hub->irq_urb_retry) irq_context: 0 &dev->mutex usb_bus_idr_lock &base->lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_urb_unlink_lock irq_context: softirq usb_kill_urb_queue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock (work_completion)(&hub->tt.clear_work) irq_context: 0 &dev->mutex usb_bus_idr_lock &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &rq->__lock irq_context: softirq drivers/block/floppy.c:640 irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback pool_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &xa->xa_lock#7 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) klist_remove_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&motor_off_timer[drive]) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&sq->pending_timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (work_completion)(&td->dispatch_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock (&sq->pending_timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#6 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_node_0 irq_context: 0 &dev->mutex usb_bus_idr_lock &rcu_state.expedited_wq irq_context: 0 &dev->mutex usb_bus_idr_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem batched_entropy_u8.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem kfence_freelist_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex udc_lock irq_context: 0 &dev->mutex gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex gdp_mutex &zone->lock irq_context: 0 &dev->mutex gdp_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex gdp_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#61 irq_context: 0 &dev->mutex subsys mutex#61 &k->k_lock irq_context: 0 &dev->mutex gadget_id_numbers.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events kernfs_notify_work irq_context: 0 (wq_completion)events kernfs_notify_work kernfs_notify_lock irq_context: 0 (wq_completion)events kernfs_notify_work &root->kernfs_supers_rwsem irq_context: 0 &dev->mutex subsys mutex#62 irq_context: 0 func_lock irq_context: 0 g_tf_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &____s->seqcount irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex fs_reclaim &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &hub->irq_urb_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) (&hub->irq_urb_retry) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &obj_hash[i].lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &base->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_urb_unlink_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock &bh->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock &p->pi_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) usb_kill_urb_queue.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &pool->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &rq->__lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq usb_kill_urb_queue.lock &p->pi_lock irq_context: softirq usb_kill_urb_queue.lock &p->pi_lock &rq->__lock irq_context: softirq usb_kill_urb_queue.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) (work_completion)(&hub->tt.clear_work) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &vhci_hcd->vhci->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) device_state_lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &n->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &meta->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &meta->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq (&vblank->disable_timer) irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &____s->seqcount irq_context: softirq (&q->timeout) irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 reading_mutex &x->wait#12 irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &c->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &zone->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &____s->seqcount irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) pool_lock#2 irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) fs_reclaim irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &x->wait#19 irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) (&timer.timer) irq_context: 0 &dev->mutex &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex gdp_mutex &c->lock irq_context: 0 i8042_lock irq_context: 0 &dev->mutex i8042_lock irq_context: 0 &dev->mutex i8042_lock (console_sem).lock irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 &dev->mutex &x->wait#20 irq_context: hardirq i8042_lock &x->wait#20 irq_context: hardirq i8042_lock &x->wait#20 &p->pi_lock irq_context: 0 &dev->mutex (&timer.timer) irq_context: 0 &dev->mutex &desc->request_mutex proc_subdir_lock irq_context: 0 &dev->mutex &desc->request_mutex &ent->pde_unload_lock irq_context: 0 &dev->mutex &desc->request_mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex &desc->request_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex &desc->request_mutex pool_lock#2 irq_context: 0 &dev->mutex serio_event_lock irq_context: 0 &dev->mutex serio_event_lock pool_lock#2 irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long irq_context: 0 (wq_completion)events_long serio_event_work irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex serio_event_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex semaphore->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex subsys mutex#63 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 misc_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_ida.xa_lock irq_context: 0 input_ida.xa_lock pool_lock#2 irq_context: 0 subsys mutex#30 irq_context: 0 subsys mutex#30 &k->k_lock irq_context: 0 input_mutex input_ida.xa_lock irq_context: 0 input_mutex fs_reclaim irq_context: 0 input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 input_mutex pool_lock#2 irq_context: 0 input_mutex &x->wait#9 irq_context: 0 input_mutex &obj_hash[i].lock irq_context: 0 input_mutex &dev->mutex#2 irq_context: 0 input_mutex chrdevs_lock irq_context: 0 input_mutex &k->list_lock irq_context: 0 input_mutex lock irq_context: 0 input_mutex lock kernfs_idr_lock irq_context: 0 input_mutex &root->kernfs_rwsem irq_context: 0 input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 input_mutex bus_type_sem irq_context: 0 input_mutex sysfs_symlink_target_lock irq_context: 0 input_mutex &pcp->lock &zone->lock irq_context: 0 input_mutex &zone->lock irq_context: 0 input_mutex &____s->seqcount irq_context: 0 input_mutex rcu_read_lock pool_lock#2 irq_context: 0 input_mutex &root->kernfs_rwsem irq_context: 0 input_mutex &c->lock irq_context: 0 input_mutex &dev->power.lock irq_context: 0 input_mutex dpm_list_mtx irq_context: 0 input_mutex req_lock irq_context: 0 input_mutex &p->pi_lock irq_context: 0 input_mutex &p->pi_lock &rq->__lock irq_context: 0 input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex &x->wait#11 irq_context: 0 input_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex uevent_sock_mutex irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex running_helpers_waitq.lock irq_context: 0 input_mutex &k->k_lock irq_context: 0 input_mutex subsys mutex#30 irq_context: 0 input_mutex subsys mutex#30 &k->k_lock irq_context: 0 serio_event_lock irq_context: 0 serio_event_lock pool_lock#2 irq_context: 0 serio_event_lock rcu_read_lock &pool->lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex i8042_lock irq_context: hardirq &serio->lock irq_context: 0 &new_driver->dynids.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &base->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &pool->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtc_ida.xa_lock irq_context: 0 &dev->mutex rtc_lock irq_context: 0 &dev->mutex &rtc->ops_lock irq_context: 0 &dev->mutex &rtc->ops_lock rtc_lock irq_context: 0 &dev->mutex chrdevs_lock irq_context: 0 &dev->mutex req_lock irq_context: 0 &dev->mutex &x->wait#11 irq_context: 0 &dev->mutex subsys mutex#27 irq_context: 0 &dev->mutex subsys mutex#27 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &x->wait#9 irq_context: 0 &dev->mutex subsys mutex#27 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 platform_devid_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#27 &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 lock irq_context: 0 &dev->mutex subsys mutex#27 lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 bus_type_sem irq_context: 0 &dev->mutex subsys mutex#27 sysfs_symlink_target_lock irq_context: 0 &dev->mutex subsys mutex#27 &pcp->lock &zone->lock irq_context: 0 &dev->mutex subsys mutex#27 &zone->lock irq_context: 0 &dev->mutex subsys mutex#27 &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->power.lock irq_context: 0 &dev->mutex subsys mutex#27 dpm_list_mtx irq_context: 0 &dev->mutex subsys mutex#27 &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex subsys mutex#27 &c->lock irq_context: 0 &dev->mutex subsys mutex#27 uevent_sock_mutex irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#27 running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#27 &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex device_links_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &c->lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#3 irq_context: softirq rcu_callback fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 wakeup_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#27 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_callback fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#15 irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#15 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 events_lock irq_context: 0 &dev->mutex subsys mutex#27 rtcdev_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex (&timer.timer) irq_context: hardirq &serio->lock &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &dev->mutex#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access subsys mutex#64 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access subsys mutex#64 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access leds_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &trig->leddev_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &dev->event_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex input_ida.xa_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex chrdevs_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex req_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &x->wait#11 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &pool->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex input_devices_poll_wait.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex i8042_lock irq_context: hardirq &serio->lock &dev->power.lock irq_context: hardirq &serio->lock &dev->event_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &base->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &pool->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 g_smscore_deviceslock irq_context: 0 g_smscore_deviceslock fs_reclaim irq_context: 0 g_smscore_deviceslock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 g_smscore_deviceslock pool_lock#2 irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex (&timer.timer) irq_context: 0 &sig->cred_guard_mutex quarantine_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex input_ida.xa_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &dev->mutex#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex chrdevs_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex req_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &x->wait#11 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pool->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &mousedev->mutex/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex input_devices_poll_wait.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &rq->__lock irq_context: 0 cx231xx_devlist_mutex irq_context: 0 em28xx_devlist_mutex irq_context: 0 pvr2_context_sync_data.lock irq_context: 0 &dev->mutex core_lock irq_context: 0 &dev->mutex core_lock fs_reclaim irq_context: 0 &dev->mutex core_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex core_lock pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem i2c_dev_list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &x->wait#9 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem chrdevs_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &k->list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &k->list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex fs_reclaim irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem bus_type_sem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &c->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &zone->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &____s->seqcount irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &dev->power.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem dpm_list_mtx irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem req_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &x->wait#11 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem uevent_sock_mutex irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) kfence_freelist_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem running_helpers_waitq.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &k->k_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#65 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#65 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#66 irq_context: 0 &dev->mutex core_lock &k->list_lock irq_context: 0 &dev->mutex core_lock &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock irq_context: 0 &dev->mutex dvbdev_register_lock (console_sem).lock irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex (kmod_concurrent_max).lock irq_context: 0 &dev->mutex &x->wait#17 irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex &dev->mutex lock irq_context: 0 &dev->mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &dev->mutex &zone->lock irq_context: 0 &dev->mutex &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex frontend_mutex irq_context: 0 &dev->mutex frontend_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex (console_sem).lock irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex frontend_mutex &rq->__lock irq_context: 0 &dev->mutex frontend_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex frontend_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex frontend_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &c->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &zone->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &____s->seqcount irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock minor_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &xa->xa_lock#9 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock (console_sem).lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &x->wait#9 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &k->list_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock bus_type_sem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &dev->power.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock dpm_list_mtx irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock req_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &rq->__lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &x->wait#11 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock uevent_sock_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &k->k_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock subsys mutex#67 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock subsys mutex#67 &k->k_lock irq_context: 0 &dev->mutex init_mm.page_table_lock irq_context: 0 &dev->mutex &dmxdev->lock irq_context: 0 &dev->mutex dvbdev_register_lock fs_reclaim irq_context: 0 &dev->mutex dvbdev_register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex dvbdev_register_lock pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock minor_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock &xa->xa_lock#9 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex irq_context: 0 &dev->mutex dvbdev_register_lock &c->lock irq_context: 0 &dev->mutex dvbdev_register_lock &xa->xa_lock#9 pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock &x->wait#9 irq_context: 0 &dev->mutex dvbdev_register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock &k->list_lock irq_context: 0 &dev->mutex dvbdev_register_lock gdp_mutex irq_context: 0 &dev->mutex dvbdev_register_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex dvbdev_register_lock lock irq_context: 0 &dev->mutex dvbdev_register_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock bus_type_sem irq_context: 0 &dev->mutex dvbdev_register_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &dev->power.lock irq_context: 0 &dev->mutex dvbdev_register_lock dpm_list_mtx irq_context: 0 &dev->mutex dvbdev_register_lock req_lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex dvbdev_register_lock &rq->__lock irq_context: 0 &dev->mutex dvbdev_register_lock &x->wait#11 irq_context: 0 &dev->mutex dvbdev_register_lock uevent_sock_mutex irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex dvbdev_register_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex dvbdev_register_lock &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock subsys mutex#67 irq_context: 0 &dev->mutex dvbdev_register_lock subsys mutex#67 &k->k_lock irq_context: 0 &dev->mutex &dvbdemux->mutex irq_context: 0 &dev->mutex media_devnode_lock irq_context: 0 &dev->mutex subsys mutex#68 irq_context: 0 &dev->mutex videodev_lock irq_context: 0 &dev->mutex subsys mutex#69 irq_context: 0 &dev->mutex subsys mutex#69 &k->k_lock irq_context: 0 &dev->mutex &xa->xa_lock#9 irq_context: 0 &dev->mutex &mdev->graph_mutex irq_context: 0 &dev->mutex &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex gdp_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vimc_sensor:393:(&vsensor->hdl)->_lock irq_context: 0 &dev->mutex &mdev->graph_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex &mdev->graph_mutex kfence_freelist_lock irq_context: 0 &dev->mutex &v4l2_dev->lock irq_context: 0 &dev->mutex vimc_debayer:578:(&vdebayer->hdl)->_lock irq_context: 0 &dev->mutex vimc_lens:61:(&vlens->hdl)->_lock irq_context: 0 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &dev->mutex tk_core.seq.seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1618:(hdl_fb)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &obj_hash[i].lock irq_context: 0 &adap->kthread_waitq irq_context: 0 &dev->mutex cec_devnode_lock irq_context: 0 &dev->mutex subsys mutex#70 irq_context: 0 &dev->mutex pin_fs_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &dev->mutex &adap->lock irq_context: 0 &dev->mutex &adap->lock tk_core.seq.seqcount irq_context: 0 &dev->mutex &adap->lock &adap->devnode.lock_fhs irq_context: 0 &dev->cec_xfers_slock irq_context: 0 &dev->kthread_waitq_cec irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &meta->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 kfence_freelist_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &zone->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &zone->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u8.lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &zone->lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &rq->__lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &____s->seqcount irq_context: softirq &(&tbl->gc_work)->timer irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 ptp_clocks_map.xa_lock irq_context: 0 subsys mutex#71 irq_context: 0 subsys mutex#71 &k->k_lock irq_context: 0 pers_lock irq_context: 0 _lock irq_context: 0 dm_bufio_clients_lock irq_context: 0 _ps_lock irq_context: 0 _lock#2 irq_context: 0 _lock#3 irq_context: 0 register_lock#2 irq_context: 0 subsys mutex#72 irq_context: 0 subsys mutex#72 &k->k_lock irq_context: 0 bp_lock irq_context: 0 bp_lock irq_context: 0 subsys mutex#73 irq_context: 0 subsys mutex#73 &k->k_lock irq_context: softirq (&dsp_spl_tl) irq_context: softirq (&dsp_spl_tl) dsp_lock irq_context: softirq (&dsp_spl_tl) dsp_lock iclock_lock irq_context: softirq (&dsp_spl_tl) dsp_lock iclock_lock tk_core.seq.seqcount irq_context: softirq (&dsp_spl_tl) dsp_lock &obj_hash[i].lock irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock &obj_hash[i].lock irq_context: 0 leds_list_lock &led_cdev->trigger_lock irq_context: 0 rtnl_mutex lock#7 irq_context: 0 intf_mutex irq_context: 0 iscsi_transport_lock irq_context: 0 subsys mutex#74 irq_context: 0 subsys mutex#74 &k->k_lock irq_context: 0 link_ops_rwsem irq_context: 0 disable_lock irq_context: 0 disable_lock fs_reclaim irq_context: 0 disable_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 disable_lock pool_lock#2 irq_context: 0 disable_lock &x->wait#9 irq_context: 0 disable_lock &obj_hash[i].lock irq_context: 0 disable_lock &k->list_lock irq_context: 0 disable_lock lock irq_context: 0 disable_lock lock kernfs_idr_lock irq_context: 0 disable_lock &root->kernfs_rwsem irq_context: 0 disable_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 disable_lock bus_type_sem irq_context: 0 disable_lock sysfs_symlink_target_lock irq_context: 0 disable_lock &c->lock irq_context: 0 disable_lock &pcp->lock &zone->lock irq_context: 0 disable_lock &zone->lock irq_context: 0 disable_lock &____s->seqcount irq_context: 0 disable_lock &k->k_lock irq_context: 0 disable_lock &root->kernfs_rwsem irq_context: 0 disable_lock &dev->power.lock irq_context: 0 disable_lock dpm_list_mtx irq_context: 0 disable_lock &(&priv->bus_notifier)->rwsem irq_context: 0 disable_lock uevent_sock_mutex irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 disable_lock running_helpers_waitq.lock irq_context: 0 disable_lock &rq->__lock irq_context: 0 &tx_task->waiting irq_context: 0 disable_lock &dev->mutex &dev->power.lock irq_context: 0 disable_lock &dev->mutex &k->list_lock irq_context: 0 disable_lock &dev->mutex &k->k_lock irq_context: 0 disable_lock subsys mutex#3 irq_context: 0 subsys mutex#75 irq_context: 0 subsys mutex#75 &k->k_lock irq_context: 0 service_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 tasklist_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &n->list_lock &c->lock irq_context: 0 misc_mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 misc_mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 misc_mtx fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 misc_mtx fill_pool_map-wait-type-override &zone->lock irq_context: 0 misc_mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 misc_mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 misc_mtx lock kernfs_idr_lock &c->lock irq_context: 0 misc_mtx lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 misc_mtx lock kernfs_idr_lock &zone->lock irq_context: 0 misc_mtx lock kernfs_idr_lock &____s->seqcount irq_context: 0 misc_mtx rcu_read_lock &rq->__lock irq_context: 0 misc_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vsock_register_mutex irq_context: 0 comedi_drivers_list_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 subsys mutex#76 irq_context: 0 subsys mutex#76 &k->k_lock irq_context: 0 snd_ctl_layer_rwsem irq_context: 0 snd_card_mutex irq_context: 0 snd_ioctl_rwsem irq_context: 0 strings irq_context: 0 strings fs_reclaim irq_context: 0 strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 strings pool_lock#2 irq_context: 0 register_mutex irq_context: 0 sound_mutex irq_context: 0 sound_mutex fs_reclaim irq_context: 0 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sound_mutex pool_lock#2 irq_context: 0 sound_mutex &k->list_lock irq_context: 0 sound_mutex gdp_mutex irq_context: 0 sound_mutex gdp_mutex &k->list_lock irq_context: 0 sound_mutex lock irq_context: 0 sound_mutex lock kernfs_idr_lock irq_context: 0 sound_mutex &root->kernfs_rwsem irq_context: 0 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sound_mutex bus_type_sem irq_context: 0 sound_mutex sysfs_symlink_target_lock irq_context: 0 sound_mutex &root->kernfs_rwsem irq_context: 0 sound_mutex &dev->power.lock irq_context: 0 sound_mutex dpm_list_mtx irq_context: 0 sound_mutex &c->lock irq_context: 0 sound_mutex &pcp->lock &zone->lock irq_context: 0 sound_mutex &zone->lock irq_context: 0 sound_mutex &____s->seqcount irq_context: 0 sound_mutex req_lock irq_context: 0 sound_mutex &p->pi_lock irq_context: 0 sound_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sound_mutex &rq->__lock irq_context: 0 sound_mutex &cfs_rq->removed.lock irq_context: 0 sound_mutex &obj_hash[i].lock irq_context: 0 sound_mutex &x->wait#11 irq_context: 0 sound_mutex uevent_sock_mutex irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sound_mutex running_helpers_waitq.lock irq_context: 0 sound_mutex subsys mutex#76 irq_context: 0 sound_mutex subsys mutex#76 &k->k_lock irq_context: 0 register_mutex#2 irq_context: 0 register_mutex#3 irq_context: 0 register_mutex#3 fs_reclaim irq_context: 0 register_mutex#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#3 pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex irq_context: 0 register_mutex#3 sound_mutex fs_reclaim irq_context: 0 register_mutex#3 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#3 sound_mutex &c->lock irq_context: 0 register_mutex#3 sound_mutex &pcp->lock &zone->lock irq_context: 0 register_mutex#3 sound_mutex &zone->lock irq_context: 0 register_mutex#3 sound_mutex &____s->seqcount irq_context: 0 register_mutex#3 sound_mutex pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex &k->list_lock irq_context: 0 register_mutex#3 sound_mutex gdp_mutex irq_context: 0 register_mutex#3 sound_mutex gdp_mutex &k->list_lock irq_context: 0 register_mutex#3 sound_mutex lock irq_context: 0 register_mutex#3 sound_mutex lock kernfs_idr_lock irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_mutex#3 sound_mutex bus_type_sem irq_context: 0 register_mutex#3 sound_mutex sysfs_symlink_target_lock irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#3 sound_mutex &dev->power.lock irq_context: 0 register_mutex#3 sound_mutex dpm_list_mtx irq_context: 0 register_mutex#3 sound_mutex req_lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 sound_mutex &rq->__lock irq_context: 0 register_mutex#3 sound_mutex &x->wait#11 irq_context: 0 register_mutex#3 sound_mutex &obj_hash[i].lock irq_context: 0 register_mutex#3 sound_mutex uevent_sock_mutex irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 sound_mutex running_helpers_waitq.lock irq_context: 0 register_mutex#3 sound_mutex subsys mutex#76 irq_context: 0 register_mutex#3 sound_mutex subsys mutex#76 &k->k_lock irq_context: 0 register_mutex#3 clients_lock irq_context: 0 &client->ports_mutex irq_context: 0 &client->ports_mutex &client->ports_lock irq_context: 0 register_mutex#4 irq_context: 0 register_mutex#4 fs_reclaim irq_context: 0 register_mutex#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#4 pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex irq_context: 0 register_mutex#4 sound_oss_mutex fs_reclaim irq_context: 0 register_mutex#4 sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#4 sound_oss_mutex pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex sound_loader_lock irq_context: 0 register_mutex#4 sound_oss_mutex &x->wait#9 irq_context: 0 register_mutex#4 sound_oss_mutex &obj_hash[i].lock irq_context: 0 register_mutex#4 sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 register_mutex#4 sound_oss_mutex &k->list_lock irq_context: 0 register_mutex#4 sound_oss_mutex gdp_mutex irq_context: 0 register_mutex#4 sound_oss_mutex gdp_mutex &k->list_lock irq_context: 0 register_mutex#4 sound_oss_mutex lock irq_context: 0 register_mutex#4 sound_oss_mutex lock kernfs_idr_lock irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex bus_type_sem irq_context: 0 register_mutex#4 sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 register_mutex#4 sound_oss_mutex &c->lock irq_context: 0 register_mutex#4 sound_oss_mutex &____s->seqcount irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex &dev->power.lock irq_context: 0 register_mutex#4 sound_oss_mutex dpm_list_mtx irq_context: 0 register_mutex#4 sound_oss_mutex req_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#4 sound_oss_mutex &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex &x->wait#11 irq_context: 0 register_mutex#4 sound_oss_mutex uevent_sock_mutex irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#4 sound_oss_mutex running_helpers_waitq.lock irq_context: 0 register_mutex#4 sound_oss_mutex subsys mutex#76 irq_context: 0 register_mutex#4 sound_oss_mutex subsys mutex#76 &k->k_lock irq_context: 0 clients_lock irq_context: 0 &client->ports_lock irq_context: 0 &grp->list_mutex/1 irq_context: 0 &grp->list_mutex#2 irq_context: 0 &grp->list_mutex#2 &grp->list_lock irq_context: 0 &grp->list_mutex/1 clients_lock irq_context: 0 &grp->list_mutex/1 &client->ports_lock irq_context: 0 (wq_completion)events async_lookup_work irq_context: 0 (wq_completion)events async_lookup_work fs_reclaim irq_context: 0 (wq_completion)events async_lookup_work fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events async_lookup_work pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work clients_lock irq_context: 0 (wq_completion)events async_lookup_work &client->ports_lock irq_context: 0 (wq_completion)events async_lookup_work snd_card_mutex irq_context: 0 (wq_completion)events async_lookup_work (kmod_concurrent_max).lock irq_context: 0 (wq_completion)events async_lookup_work &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events async_lookup_work &x->wait#17 irq_context: 0 (wq_completion)events async_lookup_work &pool->lock irq_context: 0 (wq_completion)events async_lookup_work &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 &pcp->lock &zone->lock irq_context: 0 register_mutex#3 &zone->lock irq_context: 0 register_mutex#3 &____s->seqcount irq_context: 0 register_mutex#3 &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &pcp->lock &zone->lock irq_context: 0 (wq_completion)events async_lookup_work &zone->lock irq_context: 0 (wq_completion)events async_lookup_work &____s->seqcount irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work running_helpers_waitq.lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work autoload_work irq_context: 0 (wq_completion)events async_lookup_work &x->wait#10 irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events autoload_work irq_context: 0 (wq_completion)events autoload_work &k->list_lock irq_context: 0 (wq_completion)events autoload_work &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 &obj_hash[i].lock irq_context: 0 register_mutex#3 rcu_read_lock pool_lock#2 irq_context: 0 &grp->list_mutex/1 register_lock#3 irq_context: 0 &grp->list_mutex/1 fs_reclaim irq_context: 0 &grp->list_mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &grp->list_mutex/1 pool_lock#2 irq_context: 0 &dev->mutex snd_card_mutex irq_context: 0 &dev->mutex &entry->access irq_context: 0 &dev->mutex info_mutex irq_context: 0 &dev->mutex info_mutex proc_subdir_lock irq_context: 0 &dev->mutex info_mutex fs_reclaim irq_context: 0 &dev->mutex info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex info_mutex &c->lock irq_context: 0 &dev->mutex info_mutex &____s->seqcount irq_context: 0 &dev->mutex info_mutex pool_lock#2 irq_context: 0 &dev->mutex info_mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex info_mutex proc_subdir_lock irq_context: 0 &dev->mutex &card->controls_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#10 irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#10 pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem &card->ctl_files_rwlock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#10 &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#10 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#10 &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#10 &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#10 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#76 irq_context: 0 &dev->mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 irq_context: 0 &dev->mutex register_mutex#2 fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 sound_mutex pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &k->list_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex bus_type_sem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &c->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &dev->power.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex dpm_list_mtx irq_context: 0 &dev->mutex register_mutex#2 sound_mutex req_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &x->wait#11 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex uevent_sock_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex subsys mutex#76 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 register_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 &c->lock irq_context: 0 &dev->mutex register_mutex#2 &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 &zone->lock irq_context: 0 &dev->mutex register_mutex#2 &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex kfence_freelist_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex sound_loader_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &x->wait#9 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &k->list_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex bus_type_sem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &dev->power.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex dpm_list_mtx irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex req_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &x->wait#11 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex uevent_sock_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex subsys mutex#76 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &c->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 strings irq_context: 0 &dev->mutex register_mutex#2 strings fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 strings pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 &entry->access irq_context: 0 &dev->mutex register_mutex#2 info_mutex irq_context: 0 &dev->mutex sound_mutex irq_context: 0 &dev->mutex sound_mutex fs_reclaim irq_context: 0 &dev->mutex sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex sound_mutex pool_lock#2 irq_context: 0 &dev->mutex sound_mutex &k->list_lock irq_context: 0 &dev->mutex sound_mutex lock irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex sound_mutex bus_type_sem irq_context: 0 &dev->mutex sound_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_mutex &c->lock irq_context: 0 &dev->mutex sound_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_mutex &dev->power.lock irq_context: 0 &dev->mutex sound_mutex dpm_list_mtx irq_context: 0 &dev->mutex sound_mutex req_lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_mutex &rq->__lock irq_context: 0 &dev->mutex sound_mutex &x->wait#11 irq_context: 0 &dev->mutex sound_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sound_mutex uevent_sock_mutex irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex sound_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex sound_mutex &k->k_lock irq_context: 0 &dev->mutex sound_mutex subsys mutex#76 irq_context: 0 &dev->mutex sound_mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex &card->controls_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem snd_ctl_led_mutex irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &x->wait#9 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &k->list_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem bus_type_sem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &dev->power.lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem dpm_list_mtx irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &k->k_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_oss_mutex irq_context: 0 &dev->mutex sound_oss_mutex fs_reclaim irq_context: 0 &dev->mutex sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex sound_oss_mutex pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex sound_loader_lock irq_context: 0 &dev->mutex sound_oss_mutex &x->wait#9 irq_context: 0 &dev->mutex sound_oss_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sound_oss_mutex &k->list_lock irq_context: 0 &dev->mutex sound_oss_mutex lock irq_context: 0 &dev->mutex sound_oss_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex sound_oss_mutex bus_type_sem irq_context: 0 &dev->mutex sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_oss_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_oss_mutex &dev->power.lock irq_context: 0 &dev->mutex sound_oss_mutex dpm_list_mtx irq_context: 0 &dev->mutex sound_oss_mutex req_lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex &x->wait#11 irq_context: 0 &dev->mutex sound_oss_mutex uevent_sock_mutex irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex sound_oss_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex sound_oss_mutex &k->k_lock irq_context: 0 &dev->mutex sound_oss_mutex subsys mutex#76 irq_context: 0 &dev->mutex sound_oss_mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex strings irq_context: 0 &dev->mutex strings fs_reclaim irq_context: 0 &dev->mutex strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex strings pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &____s->seqcount irq_context: 0 &dev->mutex info_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex info_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex sound_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex sound_mutex &zone->lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex sound_oss_mutex &c->lock irq_context: 0 &dev->mutex sound_oss_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#5 irq_context: 0 &dev->mutex register_mutex#3 irq_context: 0 &dev->mutex register_mutex#3 fs_reclaim irq_context: 0 &dev->mutex register_mutex#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#3 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#3 clients_lock irq_context: 0 &dev->mutex clients_lock irq_context: 0 &dev->mutex &client->ports_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 irq_context: 0 &dev->mutex &grp->list_mutex/1 clients_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &client->ports_lock irq_context: 0 &dev->mutex &client->ports_mutex irq_context: 0 &dev->mutex &client->ports_mutex &client->ports_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 register_lock#3 irq_context: 0 &dev->mutex &grp->list_mutex/1 fs_reclaim irq_context: 0 &dev->mutex &grp->list_mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &grp->list_mutex/1 pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex sound_oss_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex sound_oss_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &grp->list_mutex/1 &c->lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &zone->lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &____s->seqcount irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex failover_lock irq_context: 0 llc_sap_list_lock irq_context: 0 llc_sap_list_lock pool_lock#2 irq_context: 0 act_id_mutex irq_context: 0 act_id_mutex fs_reclaim irq_context: 0 act_id_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 act_id_mutex pool_lock#2 irq_context: 0 act_mod_lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 ife_mod_lock irq_context: 0 pernet_ops_rwsem nf_connlabels_lock irq_context: 0 cls_mod_lock irq_context: 0 ematch_mod_lock irq_context: 0 sock_diag_table_mutex irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem kfence_freelist_lock irq_context: 0 nfnl_subsys_acct irq_context: 0 nfnl_subsys_queue irq_context: 0 nfnl_subsys_ulog irq_context: 0 nf_log_mutex irq_context: 0 nfnl_subsys_osf irq_context: 0 nf_sockopt_mutex irq_context: 0 nfnl_subsys_ctnetlink irq_context: 0 nfnl_subsys_ctnetlink_exp irq_context: 0 pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 nfnl_subsys_cttimeout irq_context: 0 nfnl_subsys_cthelper irq_context: 0 nf_ct_helper_mutex irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 nf_conntrack_expect_lock irq_context: 0 net_rwsem irq_context: 0 nf_conntrack_mutex irq_context: 0 pernet_ops_rwsem nf_log_mutex irq_context: 0 nf_ct_nat_helpers_mutex irq_context: 0 nfnl_subsys_nftables irq_context: 0 nfnl_subsys_nftcompat irq_context: 0 masq_mutex irq_context: 0 masq_mutex pernet_ops_rwsem irq_context: 0 masq_mutex pernet_ops_rwsem rtnl_mutex irq_context: 0 masq_mutex (inetaddr_chain).rwsem irq_context: 0 masq_mutex inet6addr_chain.lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 &xt[i].mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 subsys mutex#77 irq_context: 0 subsys mutex#77 &k->k_lock irq_context: 0 nfnl_subsys_ipset irq_context: 0 ip_set_type_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 ip_vs_sched_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 ip_vs_pe_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 tunnel4_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 xfrm4_protocol_mutex irq_context: 0 &xt[i].mutex fs_reclaim irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xt[i].mutex pool_lock#2 irq_context: 0 inet_diag_table_mutex irq_context: 0 xfrm_km_lock irq_context: 0 xfrm_translator_lock irq_context: 0 xfrm6_protocol_mutex irq_context: 0 tunnel6_mutex irq_context: 0 xfrm_if_cb_lock irq_context: 0 inetsw6_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock irq_context: 0 &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 (crypto_chain).rwsem fs_reclaim irq_context: 0 (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (crypto_chain).rwsem pool_lock#2 irq_context: 0 (crypto_chain).rwsem kthread_create_lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (crypto_chain).rwsem &rq->__lock irq_context: 0 (crypto_chain).rwsem &x->wait irq_context: 0 (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 &x->wait#21 irq_context: 0 &x->wait#21 &p->pi_lock irq_context: 0 &x->wait#21 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#21 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->alloc_lock &x->wait irq_context: 0 (crypto_chain).rwsem &c->lock irq_context: 0 (crypto_chain).rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 stp_proto_mutex irq_context: 0 stp_proto_mutex llc_sap_list_lock irq_context: 0 stp_proto_mutex llc_sap_list_lock &c->lock irq_context: 0 stp_proto_mutex llc_sap_list_lock &pcp->lock &zone->lock irq_context: 0 stp_proto_mutex llc_sap_list_lock &zone->lock irq_context: 0 stp_proto_mutex llc_sap_list_lock &____s->seqcount irq_context: 0 stp_proto_mutex llc_sap_list_lock pool_lock#2 irq_context: 0 switchdev_notif_chain.lock irq_context: 0 (switchdev_blocking_notif_chain).rwsem irq_context: 0 br_ioctl_mutex irq_context: 0 nf_ct_proto_mutex irq_context: 0 ebt_mutex irq_context: 0 ebt_mutex fs_reclaim irq_context: 0 ebt_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ebt_mutex pool_lock#2 irq_context: 0 dsa_tag_drivers_lock irq_context: 0 rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 protocol_list_lock irq_context: 0 linkfail_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 rose_neigh_list_lock irq_context: 0 proto_tab_lock#2 irq_context: 0 bt_proto_lock irq_context: 0 bt_proto_lock pool_lock#2 irq_context: 0 bt_proto_lock &dir->lock irq_context: 0 bt_proto_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock chan_list_lock irq_context: 0 bt_proto_lock l2cap_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP chan_list_lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 rfcomm_wq.lock irq_context: 0 rfcomm_mutex irq_context: 0 auth_domain_lock irq_context: 0 registered_mechs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock pool_lock irq_context: 0 atm_dev_notify_chain.lock irq_context: 0 genl_mutex irq_context: 0 proto_tab_lock#3 irq_context: 0 vlan_ioctl_mutex irq_context: 0 pernet_ops_rwsem (console_sem).lock irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner_lock irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rds_info_lock irq_context: 0 rds_trans_sem irq_context: 0 rds_trans_sem (console_sem).lock irq_context: 0 rds_trans_sem console_lock console_srcu console_owner_lock irq_context: 0 rds_trans_sem console_lock console_srcu console_owner irq_context: 0 rds_trans_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 rds_trans_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &id_priv->lock irq_context: 0 lock#7 irq_context: 0 lock#7 fs_reclaim irq_context: 0 lock#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 lock#7 pool_lock#2 irq_context: 0 lock#7 &xa->xa_lock#11 irq_context: 0 lock#7 &xa->xa_lock#11 pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem wq_pool_mutex irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &list->lock#4 irq_context: 0 pernet_ops_rwsem &dir->lock#2 irq_context: 0 pernet_ops_rwsem ptype_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 smc_wr_rx_hash_lock irq_context: 0 v9fs_trans_lock irq_context: 0 pernet_ops_rwsem &this->receive_lock irq_context: 0 &x->wait#17 &p->pi_lock irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lowpan_nhc_lock irq_context: 0 ovs_mutex irq_context: 0 pernet_ops_rwsem once_lock irq_context: 0 pernet_ops_rwsem once_lock crngs.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#78 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &base->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &base->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&gc_work->dwork)->timer irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 kernfs_idr_lock &obj_hash[i].lock irq_context: 0 kernfs_idr_lock pool_lock#2 irq_context: 0 kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: softirq &(&ipvs->defense_work)->timer irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &s->s_inode_list_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->dropentry_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->droppacket_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->securetcp_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock pool_lock#2 irq_context: hardirq &x->wait#5 irq_context: 0 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &fs->lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock purge_vmap_area_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) pool_lock#2 irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock pool_lock#2 irq_context: 0 rcu_read_lock &cfs_rq->removed.lock irq_context: softirq (&net->can.stattimer) irq_context: softirq (&net->can.stattimer) &obj_hash[i].lock irq_context: softirq (&net->can.stattimer) &base->lock irq_context: softirq (&net->can.stattimer) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem quarantine_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &base->lock &obj_hash[i].lock irq_context: 0 fs_reclaim &cfs_rq->removed.lock irq_context: 0 fs_reclaim &obj_hash[i].lock irq_context: 0 lock map_idr_lock irq_context: 0 lock map_idr_lock &pcp->lock &zone->lock irq_context: 0 lock map_idr_lock &zone->lock irq_context: 0 lock map_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock map_idr_lock &____s->seqcount irq_context: 0 lock map_idr_lock pool_lock#2 irq_context: 0 lock map_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 lock map_idr_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 purge_vmap_area_lock irq_context: 0 lock prog_idr_lock irq_context: 0 lock prog_idr_lock pool_lock#2 irq_context: 0 bpf_lock irq_context: 0 rcu_read_lock_trace fs_reclaim irq_context: 0 rcu_read_lock_trace fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rcu_read_lock_trace pool_lock#2 irq_context: 0 rcu_read_lock_trace &obj_hash[i].lock irq_context: 0 rcu_read_lock_trace &____s->seqcount irq_context: 0 rcu_read_lock_trace &pcp->lock &zone->lock irq_context: 0 rcu_read_lock_trace &zone->lock irq_context: 0 rcu_read_lock_trace rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_trace lock irq_context: 0 rcu_read_lock_trace lock btf_idr_lock irq_context: 0 rcu_read_lock_trace lock btf_idr_lock pool_lock#2 irq_context: 0 rcu_read_lock_trace rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_read_lock_trace &rq->__lock irq_context: 0 rcu_read_lock_trace &newf->file_lock irq_context: 0 rcu_read_lock_trace &sb->s_type->i_lock_key#15 irq_context: 0 rcu_read_lock_trace &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 rcu_read_lock_trace free_vmap_area_lock irq_context: 0 rcu_read_lock_trace vmap_area_lock irq_context: 0 rcu_read_lock_trace lock map_idr_lock irq_context: 0 rcu_read_lock_trace &map->freeze_mutex irq_context: 0 rcu_read_lock_trace &p->pi_lock irq_context: 0 rcu_read_lock_trace &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock_trace &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 key_types_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem fs_reclaim irq_context: 0 key_types_sem asymmetric_key_parsers_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem asymmetric_key_parsers_sem pool_lock#2 irq_context: 0 key_types_sem asymmetric_key_parsers_sem crypto_alg_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem crypto_alg_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem fs_reclaim irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &c->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &pcp->lock &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &____s->seqcount irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem pool_lock#2 irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem kthread_create_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &x->wait irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &x->wait#21 irq_context: 0 key_types_sem asymmetric_key_parsers_sem &base->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &base->lock &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (&timer.timer) irq_context: 0 key_types_sem asymmetric_key_parsers_sem &____s->seqcount irq_context: 0 key_types_sem asymmetric_key_parsers_sem &pcp->lock &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem rcu_read_lock pool_lock#2 irq_context: 0 key_types_sem &type->lock_class irq_context: 0 key_types_sem &type->lock_class fs_reclaim irq_context: 0 key_types_sem &type->lock_class fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem &type->lock_class pool_lock#2 irq_context: 0 key_types_sem &type->lock_class &c->lock irq_context: 0 key_types_sem &type->lock_class &pcp->lock &zone->lock irq_context: 0 key_types_sem &type->lock_class &zone->lock irq_context: 0 key_types_sem &type->lock_class &____s->seqcount irq_context: 0 key_types_sem &type->lock_class key_user_lock irq_context: 0 key_types_sem &type->lock_class crngs.lock irq_context: 0 key_types_sem &type->lock_class key_serial_lock irq_context: 0 key_types_sem &type->lock_class key_construction_mutex irq_context: 0 key_types_sem &type->lock_class key_construction_mutex &obj_hash[i].lock irq_context: 0 key_types_sem &type->lock_class key_construction_mutex pool_lock#2 irq_context: 0 key_types_sem &type->lock_class &pcp->lock &zone->lock &____s->seqcount irq_context: 0 key_types_sem &type->lock_class ima_keys_lock irq_context: 0 key_types_sem &obj_hash[i].lock irq_context: 0 key_types_sem pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex crypto_alg_sem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock free_vmap_area_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock vmap_area_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock init_mm.page_table_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 slab_mutex lock irq_context: 0 slab_mutex lock kernfs_idr_lock irq_context: 0 slab_mutex &root->kernfs_rwsem irq_context: 0 slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 slab_mutex &k->list_lock irq_context: 0 slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 slab_mutex lock kernfs_idr_lock &c->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 slab_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 slab_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 slab_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 slab_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 slab_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pcpu_drain_mutex &pcp->lock irq_context: 0 pcpu_drain_mutex &pcp->lock &zone->lock irq_context: 0 pcpu_drain_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &obj_hash[i].lock pool_lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 (wq_completion)events netstamp_work irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 &x->wait#22 irq_context: 0 &x->wait#22 &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &local->services_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 pernet_ops_rwsem &call->waitq irq_context: 0 pernet_ops_rwsem &rx->call_lock irq_context: 0 pernet_ops_rwsem &rxnet->call_lock irq_context: 0 bio_slab_lock slab_mutex &root->kernfs_rwsem irq_context: 0 bio_slab_lock slab_mutex &k->list_lock irq_context: 0 bio_slab_lock slab_mutex lock irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock irq_context: 0 bio_slab_lock slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 bio_slab_lock slab_mutex &____s->seqcount irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock &c->lock irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock &____s->seqcount irq_context: softirq (&rxnet->peer_keepalive_timer) irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slab_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)krxrpcd irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &rxnet->peer_hash_lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &obj_hash[i].lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &base->lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &base->lock &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem irq_context: 0 init_user_ns.keyring_sem key_user_lock irq_context: 0 init_user_ns.keyring_sem root_key_user.lock irq_context: 0 init_user_ns.keyring_sem fs_reclaim irq_context: 0 init_user_ns.keyring_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 init_user_ns.keyring_sem pool_lock#2 irq_context: 0 init_user_ns.keyring_sem crngs.lock irq_context: 0 init_user_ns.keyring_sem key_serial_lock irq_context: 0 init_user_ns.keyring_sem key_construction_mutex irq_context: 0 init_user_ns.keyring_sem &type->lock_class irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock root_key_user.lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex keyring_name_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex pool_lock#2 irq_context: 0 init_user_ns.keyring_sem keyring_serialise_link_lock irq_context: 0 init_user_ns.keyring_sem key_construction_mutex keyring_name_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 &x->wait#21 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 template_list irq_context: 0 idr_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &c->lock irq_context: 0 ima_extend_list_mutex irq_context: 0 ima_extend_list_mutex fs_reclaim irq_context: 0 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ima_extend_list_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#2 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#2 kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pci_bus_sem irq_context: 0 clk_debug_lock irq_context: 0 (wq_completion)events_unbound deferred_probe_work irq_context: 0 (wq_completion)events_unbound deferred_probe_work deferred_probe_mutex irq_context: 0 deferred_probe_work irq_context: 0 dpm_list_mtx (console_sem).lock irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner_lock irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner &port_lock_key irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner console_owner_lock irq_context: 0 console_mutex &root->kernfs_rwsem irq_context: 0 console_mutex kernfs_notify_lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 console_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 k-sk_lock-AF_INET irq_context: 0 k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 k-slock-AF_INET irq_context: 0 k-sk_lock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 k-slock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 reg_requests_lock irq_context: 0 (wq_completion)events reg_work irq_context: 0 (wq_completion)events reg_work rtnl_mutex irq_context: 0 (wq_completion)events reg_work rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events reg_work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events reg_work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &base->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &fw_cache.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &fw_cache.lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) async_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock init_fs.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_read_lock &____s->seqcount#4 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 system_transition_mutex/1 irq_context: 0 &wq->mutex &pool->lock/1 irq_context: 0 &wq->mutex &x->wait#10 irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 acpi_gpio_deferred_req_irqs_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_owner irq_context: softirq fs/file_table.c:368 irq_context: softirq fs/file_table.c:368 rcu_read_lock &pool->lock irq_context: softirq fs/file_table.c:368 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &x->wait#9 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem bus_type_sem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem sysfs_symlink_target_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &dev->power.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dpm_list_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#79 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#79 &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fw_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem running_helpers_waitq.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &x->wait#23 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (delayed_fput_work).work irq_context: 0 (wq_completion)events (delayed_fput_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (delayed_fput_work).work pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#2 irq_context: 0 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 tomoyo_ss &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 tomoyo_ss tomoyo_log_lock irq_context: 0 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 tomoyo_ss &c->lock irq_context: 0 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 tomoyo_ss &zone->lock irq_context: 0 tomoyo_ss &____s->seqcount irq_context: 0 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 cdev_lock irq_context: 0 tty_mutex (console_sem).lock irq_context: 0 tty_mutex console_lock irq_context: 0 tty_mutex fs_reclaim irq_context: 0 tty_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tty_mutex pool_lock#2 irq_context: 0 tty_mutex tty_ldiscs_lock irq_context: 0 tty_mutex &obj_hash[i].lock irq_context: 0 tty_mutex &k->list_lock irq_context: 0 tty_mutex &k->k_lock irq_context: 0 tty_mutex &tty->legacy_mutex irq_context: 0 tty_mutex &tty->legacy_mutex &tty->read_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->write_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem fs_reclaim irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem pool_lock#2 irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem free_vmap_area_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem vmap_area_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &____s->seqcount irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem init_mm.page_table_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &pcp->lock &zone->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &zone->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->write_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->read_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->legacy_mutex irq_context: 0 &tty->legacy_mutex &tty->files_lock irq_context: 0 &tty->legacy_mutex &port->lock irq_context: 0 &tty->legacy_mutex &port->mutex irq_context: 0 &tty->legacy_mutex &port->mutex fs_reclaim irq_context: 0 &tty->legacy_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->legacy_mutex &port->mutex &____s->seqcount irq_context: 0 &tty->legacy_mutex &port->mutex pool_lock#2 irq_context: 0 &tty->legacy_mutex &port->mutex &port_lock_key irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex fs_reclaim irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex pool_lock#2 irq_context: 0 &tty->legacy_mutex &port->mutex &i->lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 &tty->legacy_mutex &port->mutex register_lock irq_context: 0 &tty->legacy_mutex &port->mutex &irq_desc_lock_class irq_context: 0 &tty->legacy_mutex &port->mutex proc_subdir_lock irq_context: 0 &tty->legacy_mutex &port->mutex proc_inum_ida.xa_lock irq_context: 0 &tty->legacy_mutex &port->mutex proc_subdir_lock irq_context: hardirq &i->lock irq_context: 0 &tty->legacy_mutex &port_lock_key irq_context: 0 &type->s_umount_key#25/1 lock#5 irq_context: 0 &type->s_umount_key#25/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#25/1 &zone->lock irq_context: 0 &type->s_umount_key#25/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#25/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#25/1 sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &c->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &pcp->lock &zone->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &zone->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &____s->seqcount irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 sb_lock irq_context: 0 &type->s_umount_key#26/1 irq_context: 0 &type->s_umount_key#26/1 fs_reclaim irq_context: 0 &type->s_umount_key#26/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#26/1 &c->lock irq_context: 0 &type->s_umount_key#26/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#26/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#26/1 &____s->seqcount irq_context: 0 &type->s_umount_key#26/1 pool_lock#2 irq_context: 0 &type->s_umount_key#26/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#26/1 &zone->lock irq_context: 0 &type->s_umount_key#26/1 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#26/1 lock#4 irq_context: 0 &type->s_umount_key#26/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#26/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#26/1 &dd->lock irq_context: 0 &type->s_umount_key#26/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#26/1 &rq->__lock irq_context: 0 &type->s_umount_key#26/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#26/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#26/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#26/1 lock#5 irq_context: 0 &type->s_umount_key#26/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#26/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#26/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#26/1 sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 &c->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 &____s->seqcount irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 sb_lock irq_context: 0 &type->s_umount_key#27/1 irq_context: 0 &type->s_umount_key#27/1 fs_reclaim irq_context: 0 &type->s_umount_key#27/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 &zone->lock irq_context: 0 &type->s_umount_key#27/1 &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#27/1 lock#4 irq_context: 0 &type->s_umount_key#27/1 &c->lock irq_context: 0 &type->s_umount_key#27/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#27/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#27/1 &dd->lock irq_context: 0 &type->s_umount_key#27/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#27/1 &rq->__lock irq_context: 0 &type->s_umount_key#27/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#27/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#27/1 lock#5 irq_context: 0 &type->s_umount_key#27/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#27/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#27/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#27/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#27/1 batched_entropy_u8.lock irq_context: 0 &type->s_umount_key#27/1 kfence_freelist_lock irq_context: 0 &type->s_umount_key#27/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#27/1 inode_hash_lock irq_context: 0 &type->s_umount_key#27/1 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#27/1 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &dd->lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#6 &c->lock irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#6 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#6 &zone->lock irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#6 &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 proc_subdir_lock irq_context: 0 &type->s_umount_key#27/1 proc_inum_ida.xa_lock irq_context: 0 &type->s_umount_key#27/1 proc_subdir_lock irq_context: 0 &type->s_umount_key#27/1 &journal->j_state_lock irq_context: 0 &type->s_umount_key#27/1 kthread_create_lock irq_context: 0 &type->s_umount_key#27/1 &p->pi_lock irq_context: 0 &type->s_umount_key#27/1 &x->wait irq_context: 0 &type->s_umount_key#27/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#27/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 &journal->j_wait_done_commit irq_context: 0 &journal->j_wait_done_commit irq_context: 0 &journal->j_wait_done_commit &p->pi_lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 &sbi->s_error_lock irq_context: 0 &type->s_umount_key#27/1 &base->lock irq_context: 0 &type->s_umount_key#27/1 &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#27/1 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &q->requeue_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit irq_context: 0 &journal->j_state_lock &journal->j_wait_commit irq_context: softirq &fq->mq_flush_lock irq_context: softirq &fq->mq_flush_lock tk_core.seq.seqcount irq_context: softirq &fq->mq_flush_lock &q->requeue_lock irq_context: softirq &fq->mq_flush_lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock bit_wait_table + i irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 &journal->j_state_lock irq_context: 0 &type->s_umount_key#27/1 &p->alloc_lock irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#27/1 cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &type->s_umount_key#27/1 wq_pool_mutex irq_context: 0 &type->s_umount_key#27/1 wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &type->s_umount_key#27/1 &ei->i_es_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex fs_reclaim irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &c->lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &n->list_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &k->list_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex lock kernfs_idr_lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 ext4_grpinfo_slab_create_mutex slab_mutex &zone->lock irq_context: 0 &type->s_umount_key#27/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#27/1 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#27/1 lock irq_context: 0 &type->s_umount_key#27/1 lock kernfs_idr_lock irq_context: 0 &type->s_umount_key#27/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#27/1 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#27/1 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &type->s_umount_key#27/1 (console_sem).lock irq_context: 0 &type->s_umount_key#27/1 console_lock console_srcu console_owner_lock irq_context: 0 &type->s_umount_key#27/1 console_lock console_srcu console_owner irq_context: 0 &type->s_umount_key#27/1 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &type->s_umount_key#27/1 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &rsp->gp_wait irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#6 irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#3 irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key namespace_sem &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: hardirq rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex &x->wait#24 irq_context: 0 rcu_state.barrier_mutex &rq->__lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &x->wait#24 irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock irq_context: softirq (&cb->timer) irq_context: softirq (&cb->timer) &obj_hash[i].lock irq_context: softirq (&cb->timer) &base->lock irq_context: softirq (&cb->timer) &base->lock &obj_hash[i].lock irq_context: 0 (init_mm).mmap_lock irq_context: 0 &type->s_umount_key#28/1 irq_context: 0 &type->s_umount_key#28/1 fs_reclaim irq_context: 0 &type->s_umount_key#28/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#28/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#28/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#28/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#28/1 sb_lock irq_context: 0 &type->s_umount_key#28/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#28/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#28/1 &zone->lock irq_context: 0 &type->s_umount_key#28/1 &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 &c->lock irq_context: 0 &type->s_umount_key#28/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#28/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 &dentry->d_lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 proc_subdir_lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 sb_writers#3 irq_context: 0 sb_writers#3 mount_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 sysctl_lock irq_context: 0 sb_writers#3 fs_reclaim irq_context: 0 sb_writers#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 pool_lock#2 irq_context: 0 sb_writers#3 &obj_hash[i].lock irq_context: 0 sb_writers#3 &h->resize_lock irq_context: 0 sb_writers#3 &h->resize_lock free_hpage_work irq_context: 0 sb_writers#3 &h->resize_lock hugetlb_lock irq_context: 0 sb_writers#3 &h->resize_lock fs_reclaim irq_context: 0 sb_writers#3 &h->resize_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &h->resize_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#3 &h->resize_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 &h->resize_lock &____s->seqcount irq_context: 0 sb_writers#3 &h->resize_lock pool_lock#2 irq_context: 0 sb_writers#3 hugetlb_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex init_fs.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#6 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&cb->timer) tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 irq_context: 0 &sig->cred_guard_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex aa_buffers_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#6 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#6 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &folio_wait_table[i] irq_context: 0 &sig->cred_guard_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_wait.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex irq_context: 0 &sig->cred_guard_mutex &iint->mutex &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &iint->mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex binfmt_lock irq_context: 0 &sig->cred_guard_mutex entries_lock irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock &lru->node[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &sighand->siglock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &newf->file_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock crngs.lock irq_context: 0 batched_entropy_u16.lock irq_context: 0 batched_entropy_u16.lock crngs.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 irq_context: 0 &mm->mmap_lock lock#4 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 irq_context: 0 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &iint->mutex irq_context: 0 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &iint->mutex &ei->xattr_sem irq_context: 0 &iint->mutex fs_reclaim irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock irq_context: 0 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &iint->mutex &folio_wait_table[i] irq_context: 0 &iint->mutex &rq->__lock irq_context: 0 &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex &obj_hash[i].lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex &c->lock irq_context: 0 &iint->mutex &pcp->lock &zone->lock irq_context: 0 &iint->mutex &zone->lock irq_context: 0 &iint->mutex &____s->seqcount irq_context: 0 &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &iint->mutex ima_extend_list_mutex irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex ima_extend_list_mutex pool_lock#2 irq_context: 0 binfmt_lock irq_context: 0 &dentry->d_lock &lru->node[i].lock irq_context: 0 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#29 irq_context: 0 &type->s_umount_key#29 shrinker_rwsem irq_context: 0 &type->s_umount_key#29 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#29 &dentry->d_lock irq_context: 0 &type->s_umount_key#29 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->s_umount_key#29 rename_lock.seqcount irq_context: 0 &type->s_umount_key#29 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#29 &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#29 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 &type->s_umount_key#29 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#29 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#29 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#29 sysctl_lock irq_context: 0 &type->s_umount_key#29 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#29 pool_lock#2 irq_context: 0 &type->s_umount_key#29 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#29 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#29 sb_lock irq_context: 0 unnamed_dev_ida.xa_lock irq_context: 0 &xa->xa_lock#6 irq_context: 0 prog_idr_lock irq_context: 0 prog_idr_lock &obj_hash[i].lock irq_context: 0 prog_idr_lock pool_lock#2 irq_context: 0 map_idr_lock irq_context: 0 btf_idr_lock irq_context: 0 btf_idr_lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 btf_idr_lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pool_lock#2 irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq irq_context: 0 &vma->vm_lock->lock fs_reclaim irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &vma->vm_lock->lock &pcp->lock &zone->lock irq_context: 0 &vma->vm_lock->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 pool_lock#2 irq_context: hardirq &rq->__lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex batched_entropy_u8.lock irq_context: 0 &iint->mutex kfence_freelist_lock irq_context: 0 &iint->mutex rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dd->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 &iint->mutex mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &iint->mutex mapping.invalidate_lock batched_entropy_u8.lock irq_context: 0 &iint->mutex mapping.invalidate_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &iint->mutex mapping.invalidate_lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 &iint->mutex mapping.invalidate_lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &obj_hash[i].lock pool_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start irq_context: 0 &port->mutex irq_context: 0 &tty->ldisc_sem irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &mm->mmap_lock irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &port->mutex irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->read_wait irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 integrity_iint_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 &sb->s_type->i_lock_key#23 irq_context: 0 &p->lock irq_context: 0 &p->lock fs_reclaim irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock pool_lock#2 irq_context: 0 &p->lock &mm->mmap_lock irq_context: 0 &type->s_umount_key#30/1 irq_context: 0 &type->s_umount_key#30/1 fs_reclaim irq_context: 0 &type->s_umount_key#30/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#30/1 pool_lock#2 irq_context: 0 &type->s_umount_key#30/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#30/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#30/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#30/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#30/1 sb_lock irq_context: 0 &type->s_umount_key#30/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#30/1 &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#30/1 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 &type->s_umount_key#30/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#30/1 &dentry->d_lock irq_context: 0 &root->kernfs_iattr_rwsem irq_context: 0 &type->i_mutex_dir_key#4 irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq irq_context: 0 &ent->pde_unload_lock irq_context: 0 &p->lock file_systems_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 &x->wait#25 irq_context: 0 &mm->mmap_lock resource_lock irq_context: 0 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &net->unx.table.locks[i] irq_context: 0 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->unx.table.locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &u->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->lock clock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: softirq bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &____s->seqcount#3 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->wait_chldexit irq_context: 0 tasklist_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) quarantine_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock irq_context: 0 &mm->mmap_lock &p->alloc_lock irq_context: 0 &mm->mmap_lock lock#4 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &memcg->mm_list.lock irq_context: 0 tasklist_lock &sighand->siglock &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock &____s->seqcount#5 irq_context: 0 &prev->lock irq_context: 0 &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 sb_writers#4 irq_context: 0 sb_writers#4 mount_lock irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 sb_writers#3 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &wb->list_lock irq_context: 0 sb_writers#3 &wb->list_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pid->lock irq_context: 0 &p->alloc_lock &fs->lock &dentry->d_lock irq_context: 0 &p->lock &____s->seqcount irq_context: 0 &p->lock rcu_read_lock pool_lock#2 irq_context: 0 &p->lock &obj_hash[i].lock irq_context: 0 &p->lock namespace_sem irq_context: 0 &p->lock namespace_sem &new_ns->ns_lock irq_context: 0 &p->lock namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &p->lock namespace_sem rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#31 irq_context: 0 &type->s_umount_key#31 &lru->node[i].lock irq_context: 0 &type->s_umount_key#31 &dentry->d_lock irq_context: 0 &type->s_umount_key#31 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#31 pool_lock#2 irq_context: 0 &type->s_umount_key#31 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#31 &sb->s_type->i_lock_key#22 &lru->node[i].lock irq_context: 0 &type->s_umount_key#31 &journal->j_state_lock irq_context: 0 &type->s_umount_key#31 &p->alloc_lock irq_context: 0 &type->s_umount_key#31 (work_completion)(&sbi->s_error_work) irq_context: 0 &type->s_umount_key#31 &journal->j_state_lock irq_context: 0 &type->s_umount_key#31 key#3 irq_context: 0 &type->s_umount_key#31 key#4 irq_context: 0 &type->s_umount_key#31 &sbi->s_error_lock irq_context: 0 &type->s_umount_key#31 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 &base->lock irq_context: 0 &type->s_umount_key#31 &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#31 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 &rq->__lock irq_context: 0 &type->s_umount_key#31 bit_wait_table + i irq_context: 0 &type->s_umount_key#31 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx irq_context: 0 &type->s_umount_key#31 ext4_li_mtx fs_reclaim irq_context: 0 &type->s_umount_key#31 ext4_li_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31 ext4_li_mtx pool_lock#2 irq_context: 0 &type->s_umount_key#31 ext4_li_mtx batched_entropy_u16.lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &eli->li_list_mtx irq_context: 0 &type->s_umount_key#31 ext4_li_mtx kthread_create_lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &p->pi_lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &rq->__lock irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &x->wait irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 ext4_li_mtx &obj_hash[i].lock irq_context: 0 &eli->li_list_mtx irq_context: 0 &type->s_umount_key#31 (console_sem).lock irq_context: 0 &type->s_umount_key#31 console_lock console_srcu console_owner_lock irq_context: 0 &type->s_umount_key#31 console_lock console_srcu console_owner irq_context: 0 &type->s_umount_key#31 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &type->s_umount_key#31 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#31 mount_lock irq_context: 0 &type->s_umount_key#31 mount_lock mount_lock.seqcount irq_context: 0 &type->s_umount_key#31 mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 namespace_sem irq_context: 0 namespace_sem &new_ns->ns_lock irq_context: 0 rcu_read_lock &pid->lock irq_context: 0 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 rename_lock.seqcount irq_context: 0 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 &pid->lock irq_context: 0 sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &____s->seqcount irq_context: 0 sb_writers#4 pool_lock#2 irq_context: 0 sb_writers#4 &c->lock irq_context: 0 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 jbd2_handle &c->lock irq_context: 0 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &wb->list_lock irq_context: 0 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &wb->work_lock irq_context: 0 sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->xattr_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &xa->xa_lock#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_internal rcu_read_lock init_fs.seq.seqcount irq_context: 0 sb_internal rcu_read_lock rcu_read_lock mount_lock.seqcount irq_context: 0 sb_internal rcu_read_lock rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_internal pool_lock#2 irq_context: 0 sb_internal &journal->j_state_lock irq_context: 0 sb_internal jbd2_handle irq_context: 0 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_internal jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_internal &obj_hash[i].lock irq_context: 0 &ei->i_data_sem irq_context: 0 &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sighand->siglock hrtimer_bases.lock irq_context: 0 &sighand->siglock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &sighand->siglock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 file_rwsem irq_context: 0 file_rwsem &ctx->flc_lock irq_context: 0 file_rwsem &ctx->flc_lock &fll->lock irq_context: 0 &ctx->flc_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 mount_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex sb_writers#4 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem quarantine_lock irq_context: 0 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &c->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &p->lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &iint->mutex tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 mount_lock irq_context: 0 &iint->mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex sb_writers#4 pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &iint->mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &iint->mutex sb_writers#4 &wb->list_lock irq_context: 0 &iint->mutex sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 rcu_read_lock &p->alloc_lock irq_context: 0 &type->s_umount_key#32/1 irq_context: 0 &type->s_umount_key#32/1 fs_reclaim irq_context: 0 &type->s_umount_key#32/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32/1 pool_lock#2 irq_context: 0 &type->s_umount_key#32/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#32/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#32/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#32/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#32/1 &c->lock irq_context: 0 &type->s_umount_key#32/1 &____s->seqcount irq_context: 0 &type->s_umount_key#32/1 sb_lock irq_context: 0 &type->s_umount_key#32/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#32/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_lock_key#25 irq_context: 0 &type->s_umount_key#32/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#32/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_lock_key#25 &dentry->d_lock irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 fs_reclaim irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 pool_lock#2 irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &c->lock irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &____s->seqcount irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &sb->s_type->i_lock_key#25 irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32/1 &sb->s_type->i_mutex_key#11 &sb->s_type->i_lock_key#25 &dentry->d_lock irq_context: 0 &type->s_umount_key#32/1 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->s_umount_key/1 fs_reclaim irq_context: 0 &type->s_umount_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &____s->seqcount irq_context: 0 &type->s_umount_key#33 irq_context: 0 &type->s_umount_key#33 sb_lock irq_context: 0 &type->s_umount_key#33 fs_reclaim irq_context: 0 &type->s_umount_key#33 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33 pool_lock#2 irq_context: 0 &type->s_umount_key#33 &dentry->d_lock irq_context: 0 &type->s_umount_key#33 &lru->node[i].lock irq_context: 0 &type->s_umount_key#33 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#33 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &p->lock &pcp->lock &zone->lock irq_context: 0 &p->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#34 irq_context: 0 &type->s_umount_key#34 sb_lock irq_context: 0 &type->s_umount_key#34 &dentry->d_lock irq_context: 0 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 &type->s_umount_key#35/1 irq_context: 0 &type->s_umount_key#35/1 fs_reclaim irq_context: 0 &type->s_umount_key#35/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#35/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#35/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#35/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#35/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#35/1 sb_lock irq_context: 0 &type->s_umount_key#35/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#35/1 pool_lock#2 irq_context: 0 &type->s_umount_key#35/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#35/1 &sb->s_type->i_lock_key#26 irq_context: 0 &type->s_umount_key#35/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#35/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#35/1 &sb->s_type->i_lock_key#26 &dentry->d_lock irq_context: 0 &type->s_umount_key#35/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 redirect_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock fs_reclaim irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &____s->seqcount irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock pool_lock#2 irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock rcu_read_lock pool_lock#2 irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &obj_hash[i].lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &mm->mmap_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key irq_context: hardirq &i->lock &port_lock_key irq_context: hardirq &i->lock &port_lock_key &port->lock irq_context: hardirq &i->lock &port_lock_key &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &port_lock_key irq_context: 0 &tty->ldisc_sem &tty->write_wait irq_context: 0 &type->s_umount_key#36/1 irq_context: 0 &type->s_umount_key#36/1 fs_reclaim irq_context: 0 &type->s_umount_key#36/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#36/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#36/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#36/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#36/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#36/1 sb_lock irq_context: 0 &type->s_umount_key#36/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#36/1 pool_lock#2 irq_context: 0 &type->s_umount_key#36/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#36/1 &sb->s_type->i_lock_key#27 irq_context: 0 &type->s_umount_key#36/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#36/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#36/1 &sb->s_type->i_lock_key#27 &dentry->d_lock irq_context: 0 &type->s_umount_key#36/1 fuse_mutex irq_context: 0 &type->s_umount_key#36/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#37/1 irq_context: 0 &type->s_umount_key#37/1 fs_reclaim irq_context: 0 &type->s_umount_key#37/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#37/1 pool_lock#2 irq_context: 0 &type->s_umount_key#37/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#37/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#37/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#37/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#37/1 sb_lock irq_context: 0 &type->s_umount_key#37/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#37/1 &c->lock irq_context: 0 &type->s_umount_key#37/1 &____s->seqcount irq_context: 0 &type->s_umount_key#37/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#37/1 &sb->s_type->i_lock_key#28 irq_context: 0 &type->s_umount_key#37/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#37/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#37/1 &sb->s_type->i_lock_key#28 &dentry->d_lock irq_context: 0 &type->s_umount_key#37/1 pstore_sb_lock irq_context: 0 &type->s_umount_key#37/1 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem &c->lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 irq_context: 0 &type->s_umount_key#38/1 fs_reclaim irq_context: 0 &type->s_umount_key#38/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#38/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#38/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#38/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#38/1 sb_lock irq_context: 0 &type->s_umount_key#38/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#38/1 pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 &sb->s_type->i_lock_key#29 irq_context: 0 &type->s_umount_key#38/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#38/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#38/1 &sb->s_type->i_lock_key#29 &dentry->d_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock (kmod_concurrent_max).lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock fs_reclaim irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock &pool->lock/1 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &x->wait#17 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &rq->__lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 uts_sem irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock pgd_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock key irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock pcpu_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock percpu_counters_lock irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock &____s->seqcount irq_context: 0 &type->s_umount_key#38/1 bpf_preload_lock running_helpers_waitq.lock irq_context: 0 &type->s_umount_key#38/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#39 irq_context: 0 &type->s_umount_key#39 sb_lock irq_context: 0 &type->s_umount_key#39 fs_reclaim irq_context: 0 &type->s_umount_key#39 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#39 pool_lock#2 irq_context: 0 &type->s_umount_key#39 &dentry->d_lock irq_context: 0 &type->s_umount_key#39 &lru->node[i].lock irq_context: 0 &type->s_umount_key#39 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#39 &obj_hash[i].lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key irq_context: 0 &type->i_mutex_dir_key#5 irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &wq irq_context: 0 sb_writers#5 irq_context: 0 sb_writers#5 mount_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key irq_context: 0 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u32.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &wb->list_lock irq_context: 0 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &sig->cred_guard_mutex tomoyo_ss quarantine_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &meta->lock irq_context: 0 &mm->mmap_lock quarantine_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock &meta->lock irq_context: 0 &rq->__lock &obj_hash[i].lock irq_context: 0 &rq->__lock &base->lock irq_context: 0 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 uts_sem irq_context: 0 uts_sem hostname_poll.wait.lock irq_context: 0 &sig->cred_guard_mutex &n->list_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock mount_lock.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 mount_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &fs->lock &dentry->d_lock irq_context: 0 dup_mmap_sem irq_context: 0 dup_mmap_sem &mm->mmap_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->page_table_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &obj_hash[i].lock irq_context: 0 &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock delayed_uprobe_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &memcg->mm_list.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem kfence_freelist_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 &p->lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xattrs->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#6 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &info->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &____s->seqcount irq_context: 0 &p->alloc_lock &x->wait#25 irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &c->lock irq_context: 0 &type->i_mutex_dir_key#5 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 pool_lock#2 irq_context: 0 &sighand->siglock &obj_hash[i].lock irq_context: 0 &sighand->siglock pool_lock#2 irq_context: 0 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &iint->mutex &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex key#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pgd_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock key irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pcpu_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 rcu_read_lock mount_lock.seqcount irq_context: 0 &u->iolock irq_context: 0 &u->iolock rlock-AF_UNIX irq_context: 0 &ei->socket.wq.wait irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &folio_wait_table[i] irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#3 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &lru->node[i].lock irq_context: 0 &bsd_socket_locks[i] irq_context: 0 sb_writers tk_core.seq.seqcount irq_context: 0 sb_writers &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &wb->list_lock irq_context: 0 sb_writers &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &u->lock irq_context: 0 &u->lock &u->lock/1 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &group->mark_mutex irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu irq_context: 0 &group->mark_mutex fs_reclaim irq_context: 0 &group->mark_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &group->mark_mutex &____s->seqcount irq_context: 0 &group->mark_mutex pool_lock#2 irq_context: 0 &group->mark_mutex &c->lock irq_context: 0 &group->mark_mutex lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock pool_lock#2 irq_context: 0 &group->mark_mutex ucounts_lock irq_context: 0 &group->mark_mutex &mark->lock irq_context: 0 &group->mark_mutex &mark->lock &fsnotify_mark_srcu irq_context: 0 &group->mark_mutex &mark->lock &fsnotify_mark_srcu &conn->lock irq_context: 0 &group->mark_mutex &mark->lock &conn->lock irq_context: 0 &group->mark_mutex &conn->lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock/1 irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#2 sb_writers mount_lock irq_context: 0 &type->i_mutex_dir_key#2 sb_writers tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#2 sb_writers &sb->s_type->i_lock_key#5 irq_context: 0 &type->i_mutex_dir_key#2 sb_writers &wb->list_lock irq_context: 0 &type->i_mutex_dir_key#2 sb_writers &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &fsnotify_mark_srcu &conn->lock irq_context: 0 &conn->lock irq_context: 0 &evdev->client_lock irq_context: 0 &evdev->mutex irq_context: 0 &evdev->mutex &dev->mutex#2 irq_context: 0 &evdev->mutex &mm->mmap_lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &evdev->mutex &dev->mutex#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK irq_context: 0 sk_lock-AF_NETLINK slock-AF_NETLINK irq_context: 0 slock-AF_NETLINK irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rlock-AF_NETLINK irq_context: 0 cb_lock fs_reclaim irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock pool_lock#2 irq_context: 0 cb_lock rlock-AF_NETLINK irq_context: 0 rlock-AF_NETLINK irq_context: 0 &nlk->wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rhashtable_bucket rhashtable_bucket/1 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#10 genl_sk_destructing_waitq.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &nlk->wait irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->w) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rdev->beacon_registrations_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rdev->mgmt_registrations_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &wdev->pmsr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem reg_indoor_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem hwsim_radio_lock irq_context: 0 nl_table_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &obj_hash[i].lock irq_context: 0 sb_writers#6 irq_context: 0 sb_writers#6 mount_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &c->lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock &obj_hash[i].lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tk_core.seq.seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &sb->s_type->i_lock_key#8 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &wb->list_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &wb->list_lock &sb->s_type->i_lock_key#8 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 &u->lock &sk->sk_peer_lock irq_context: 0 &u->lock rlock-AF_UNIX irq_context: 0 rcu_read_lock &ei->socket.wq.wait irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock irq_context: 0 &u->iolock &obj_hash[i].lock irq_context: 0 &u->iolock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &____s->seqcount irq_context: 0 &iint->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &group->notification_waitq irq_context: 0 &group->notification_lock irq_context: 0 &client->wait irq_context: softirq rcu_callback rlock-AF_NETLINK irq_context: softirq rcu_callback &dir->lock irq_context: 0 tomoyo_ss &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 syslog_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#6 &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#6 &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#6 pool_lock#2 irq_context: 0 rcu_read_lock &rcu_state.gp_wq irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#14 irq_context: 0 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 irq_context: 0 &pipe->rd_wait irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 &lruvec->lru_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock irq_context: 0 &u->iolock &meta->lock irq_context: 0 &u->iolock kfence_freelist_lock irq_context: 0 &u->lock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &rq->__lock irq_context: 0 syslog_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 &vma->vm_lock->lock &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &ei->xattr_sem &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &sem->wait_lock irq_context: 0 &mm->mmap_lock &p->pi_lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key#5 irq_context: 0 &mm->mmap_lock &n->list_lock irq_context: 0 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &rq->__lock irq_context: 0 &pipe->mutex/1 &lock->wait_lock irq_context: 0 &pipe->mutex/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->wr_wait irq_context: 0 &lock->wait_lock irq_context: 0 &pipe->mutex/1 fs_reclaim irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 &pipe->mutex/1 &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock key irq_context: 0 &pipe->rd_wait &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#7 tk_core.seq.seqcount irq_context: 0 sb_writers#7 mount_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 &u->lock clock-AF_UNIX irq_context: 0 &u->peer_wait irq_context: 0 rlock-AF_UNIX irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 rcu_read_lock pgd_lock irq_context: 0 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock key irq_context: 0 rcu_read_lock pcpu_lock irq_context: 0 rcu_read_lock percpu_counters_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock quarantine_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 key#9 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 sb_writers#3 &dentry->d_lock irq_context: 0 sb_writers#3 tomoyo_ss irq_context: 0 sb_writers#3 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#3 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#3 tomoyo_ss &c->lock irq_context: 0 sb_writers#3 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 batched_entropy_u8.lock irq_context: 0 &iint->mutex sb_writers#4 kfence_freelist_lock irq_context: 0 &iint->mutex sb_writers#4 &meta->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem ptlock_ptr(page) irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 sk_lock-AF_UNIX irq_context: 0 sk_lock-AF_UNIX slock-AF_UNIX irq_context: 0 slock-AF_UNIX irq_context: 0 &iint->mutex sb_writers#4 &c->lock irq_context: 0 &iint->mutex sb_writers#4 &____s->seqcount irq_context: hardirq log_wait.lock &p->pi_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 batched_entropy_u8.lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &ei->xattr_sem irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xattrs->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#3 oom_adj_mutex irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock irq_context: 0 sb_writers#3 oom_adj_mutex &p->alloc_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &sk->sk_peer_lock irq_context: 0 &ep->mtx irq_context: 0 epnested_mutex irq_context: 0 epnested_mutex &ep->mtx irq_context: 0 epnested_mutex &ep->mtx fs_reclaim irq_context: 0 epnested_mutex &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 epnested_mutex &ep->mtx &____s->seqcount irq_context: 0 epnested_mutex &ep->mtx pool_lock#2 irq_context: 0 epnested_mutex &ep->mtx &c->lock irq_context: 0 epnested_mutex &ep->mtx &f->f_lock irq_context: 0 epnested_mutex &ep->mtx &ei->socket.wq.wait irq_context: 0 epnested_mutex &ep->mtx &ep->lock irq_context: 0 epnested_mutex rcu_read_lock &f->f_lock irq_context: 0 &ep->mtx fs_reclaim irq_context: 0 &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx &f->f_lock irq_context: 0 &ep->mtx pool_lock#2 irq_context: 0 &ep->mtx &group->notification_waitq irq_context: 0 &ep->mtx &group->notification_lock irq_context: 0 &ep->mtx &ep->lock irq_context: 0 &ep->mtx &sighand->signalfd_wqh irq_context: 0 &ep->mtx &sighand->siglock irq_context: 0 &ep->mtx &ei->socket.wq.wait irq_context: 0 &ep->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#6 &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#6 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#6 &____s->seqcount irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 &mm->mmap_lock &c->lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock &c->lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &meta->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq#2 irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &type->i_mutex_dir_key#4 tk_core.seq.seqcount irq_context: 0 &vma->vm_lock->lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 mount_lock irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 &wb->list_lock irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 tomoyo_ss batched_entropy_u8.lock irq_context: 0 tomoyo_ss kfence_freelist_lock irq_context: 0 tomoyo_ss &meta->lock irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu quarantine_lock irq_context: 0 remove_cache_srcu &c->lock irq_context: 0 remove_cache_srcu &n->list_lock irq_context: 0 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss &n->list_lock irq_context: 0 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#4 kfence_freelist_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31 sb_writers#4 &____s->seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &type->s_umount_key#31 sb_writers#4 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#31 sb_writers#4 lock#4 irq_context: 0 &type->s_umount_key#31 sb_writers#4 &mapping->private_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 &c->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 &dd->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock &dd->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 lock#4 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 bit_wait_table + i irq_context: 0 &type->s_umount_key#31 sb_writers#4 &rq->__lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 &eli->li_list_mtx irq_context: 0 &type->s_umount_key#31 sb_writers#4 &journal->j_state_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &dd->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &x->wait#26 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_node_0 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock rcu_node_0 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &base->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#26 irq_context: softirq &x->wait#26 &p->pi_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem (&timer.timer) irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock irq_context: 0 &type->s_umount_key#31 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &type->s_umount_key#31 sb_writers#4 &obj_hash[i].lock irq_context: 0 swap_lock irq_context: 0 &vma->vm_lock->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 irq_context: 0 sb_writers#8 mount_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq#2 irq_context: 0 kn->active fs_reclaim irq_context: 0 kn->active fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active pool_lock#2 irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#13 irq_context: 0 sb_writers#8 fs_reclaim irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 pool_lock#2 irq_context: 0 sb_writers#8 &mm->mmap_lock irq_context: 0 sb_writers#8 &of->mutex irq_context: 0 sb_writers#8 &of->mutex kn->active &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active &obj_hash[i].lock irq_context: 0 sb_writers#8 &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] irq_context: 0 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &base->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &base->lock &obj_hash[i].lock irq_context: 0 kn->active#2 fs_reclaim irq_context: 0 kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#2 &c->lock irq_context: 0 kn->active#2 &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 kn->active#2 pool_lock#2 irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#2 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#2 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 remove_cache_srcu irq_context: 0 sb_writers#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &n->list_lock &c->lock irq_context: 0 kn->active &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &c->lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 kn->active &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#5 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 mount_lock irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 &wb->list_lock irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#8 &of->mutex kn->active &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active &____s->seqcount irq_context: 0 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &____s->seqcount irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 batched_entropy_u8.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 kfence_freelist_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem pool_lock#2 irq_context: 0 kn->active#2 &n->list_lock irq_context: 0 kn->active#2 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &n->list_lock irq_context: 0 sb_writers#8 &n->list_lock &c->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 quarantine_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &n->list_lock &c->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#26 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &rq->__lock irq_context: 0 &ep->mtx &____s->seqcount irq_context: 0 &ep->mtx &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override pool_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &____s->seqcount irq_context: 0 kn->active#2 remove_cache_srcu irq_context: 0 kn->active#2 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#2 remove_cache_srcu &c->lock irq_context: 0 kn->active#2 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#2 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active#2 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active &n->list_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu irq_context: 0 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &rq->__lock irq_context: 0 kn->active &n->list_lock irq_context: 0 kn->active &n->list_lock &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 kn->active remove_cache_srcu irq_context: 0 kn->active remove_cache_srcu quarantine_lock irq_context: 0 kn->active remove_cache_srcu &c->lock irq_context: 0 kn->active remove_cache_srcu &n->list_lock irq_context: 0 kn->active remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active#2 &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &n->list_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &c->lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &n->list_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &obj_hash[i].lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 &sem->wait_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#5 &sem->wait_lock irq_context: 0 sb_writers#5 &p->pi_lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &n->list_lock &c->lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 rcu_read_lock &rq->__lock irq_context: 0 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#2 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 fs_reclaim &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &n->list_lock &c->lock irq_context: 0 kn->active#3 fs_reclaim irq_context: 0 kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &obj_hash[i].lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &____s->seqcount irq_context: 0 kn->active#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &n->list_lock irq_context: 0 kn->active#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &n->list_lock &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_node_0 irq_context: 0 kn->active#3 &n->list_lock irq_context: 0 kn->active#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#3 &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &rq->__lock irq_context: 0 &eli->li_list_mtx &obj_hash[i].lock irq_context: 0 &eli->li_list_mtx pool_lock#2 irq_context: 0 ext4_li_mtx irq_context: 0 ext4_li_mtx &eli->li_list_mtx irq_context: 0 ext4_li_mtx &obj_hash[i].lock irq_context: 0 ext4_li_mtx pool_lock#2 irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 quarantine_lock irq_context: 0 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 kn->active#3 remove_cache_srcu irq_context: 0 kn->active#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 kn->active#3 batched_entropy_u8.lock irq_context: 0 kn->active#3 kfence_freelist_lock irq_context: 0 kn->active#3 remove_cache_srcu &c->lock irq_context: 0 kn->active#3 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 kn->active#3 remove_cache_srcu &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] batched_entropy_u8.lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] kfence_freelist_lock irq_context: 0 kn->active#3 fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 kn->active#4 &rq->__lock irq_context: 0 kn->active#4 fs_reclaim irq_context: 0 kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex irq_context: 0 &p->lock &of->mutex kn->active#4 param_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &dentry->d_lock irq_context: 0 sb_writers#8 tomoyo_ss irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#4 fs_reclaim irq_context: 0 sb_writers#8 kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 irq_context: 0 sb_writers#8 iattr_mutex irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &____s->seqcount irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &c->lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &sem->wait_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#4 param_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#4 param_lock disk_events_mutex irq_context: 0 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &wb->list_lock irq_context: 0 sb_writers#8 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu irq_context: 0 sb_writers#5 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &info->lock irq_context: 0 sb_writers#5 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &xa->xa_lock#6 irq_context: 0 sb_writers#5 &obj_hash[i].lock irq_context: 0 sb_writers#5 pool_lock#2 irq_context: 0 sb_writers#5 &fsnotify_mark_srcu irq_context: 0 kn->active#5 fs_reclaim irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#5 &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &____s->seqcount irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 quarantine_lock irq_context: 0 kn->active#5 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &c->lock irq_context: 0 kn->active#5 &____s->seqcount irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 &n->list_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &n->list_lock &c->lock irq_context: 0 kn->active#6 fs_reclaim irq_context: 0 kn->active#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &n->list_lock irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#5 &n->list_lock &c->lock irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#7 fs_reclaim irq_context: 0 kn->active#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 fs_reclaim irq_context: 0 kn->active#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#9 fs_reclaim irq_context: 0 kn->active#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 &c->lock irq_context: 0 kn->active#8 &____s->seqcount irq_context: 0 kn->active#10 fs_reclaim irq_context: 0 kn->active#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#10 &c->lock irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &n->list_lock irq_context: 0 &p->lock &n->list_lock &c->lock irq_context: 0 kn->active#11 fs_reclaim irq_context: 0 kn->active#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#12 fs_reclaim irq_context: 0 kn->active#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &meta->lock irq_context: 0 kn->active#12 &c->lock irq_context: 0 kn->active#13 fs_reclaim irq_context: 0 kn->active#13 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#14 fs_reclaim irq_context: 0 kn->active#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#14 &c->lock irq_context: 0 kn->active#14 &n->list_lock irq_context: 0 kn->active#14 &n->list_lock &c->lock irq_context: 0 kn->active#14 &____s->seqcount irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#14 fs_reclaim irq_context: 0 &p->lock &of->mutex kn->active#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#14 pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#14 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sem->wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#6 &c->lock irq_context: 0 kn->active#6 &n->list_lock irq_context: 0 kn->active#6 &n->list_lock &c->lock irq_context: 0 kn->active#10 &____s->seqcount irq_context: 0 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#14 &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &device->physical_node_lock irq_context: 0 &p->lock remove_cache_srcu irq_context: 0 &p->lock remove_cache_srcu quarantine_lock irq_context: 0 &p->lock remove_cache_srcu &c->lock irq_context: 0 &p->lock remove_cache_srcu &n->list_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &p->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &p->lock &of->mutex kn->active#5 quarantine_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu irq_context: 0 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 udc_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &meta->lock irq_context: 0 kn->active#5 remove_cache_srcu irq_context: 0 kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#5 remove_cache_srcu &c->lock irq_context: 0 kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#5 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fw_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &____s->seqcount irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 &device->physical_node_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#15 fs_reclaim irq_context: 0 kn->active#15 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#15 &c->lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#15 dev_base_lock irq_context: 0 kn->active#16 fs_reclaim irq_context: 0 kn->active#16 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#16 dev_base_lock irq_context: 0 kn->active#17 fs_reclaim irq_context: 0 kn->active#17 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#18 fs_reclaim irq_context: 0 kn->active#18 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#18 dev_base_lock irq_context: 0 kn->active#19 fs_reclaim irq_context: 0 kn->active#19 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#19 &c->lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#19 dev_base_lock irq_context: 0 kn->active#20 fs_reclaim irq_context: 0 kn->active#20 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#20 &c->lock irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#20 dev_base_lock irq_context: 0 kn->active#21 fs_reclaim irq_context: 0 kn->active#21 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#21 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 batched_entropy_u8.lock irq_context: 0 sb_writers#8 kfence_freelist_lock irq_context: 0 sb_writers#8 &meta->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#22 fs_reclaim irq_context: 0 kn->active#22 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#21 pool_lock#2 irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &of->mutex irq_context: 0 &of->mutex kn->active#21 &dev->power.lock irq_context: 0 &of->mutex kn->active#21 pci_lock irq_context: 0 &of->mutex kn->active#21 pci_lock pci_config_lock irq_context: 0 kn->active#23 fs_reclaim irq_context: 0 kn->active#23 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#23 &c->lock irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: softirq lib/debugobjects.c:101 irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work irq_context: 0 kn->active#24 fs_reclaim irq_context: 0 kn->active#24 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#25 fs_reclaim irq_context: 0 kn->active#25 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 fs_reclaim irq_context: 0 kn->active#26 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx &pipe->rd_wait irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &n->list_lock &c->lock irq_context: 0 kn->active#27 fs_reclaim irq_context: 0 kn->active#27 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#28 fs_reclaim irq_context: 0 kn->active#28 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#29 fs_reclaim irq_context: 0 kn->active#29 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 fs_reclaim irq_context: 0 kn->active#30 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 &c->lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#31 fs_reclaim irq_context: 0 kn->active#31 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#32 fs_reclaim irq_context: 0 kn->active#32 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu irq_context: 0 kn->active#32 remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 kn->active#32 remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#33 fs_reclaim irq_context: 0 kn->active#33 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#33 &c->lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &n->list_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 kn->active#29 &c->lock irq_context: 0 kn->active#29 &n->list_lock irq_context: 0 kn->active#29 &n->list_lock &c->lock irq_context: 0 kn->active#30 remove_cache_srcu irq_context: 0 kn->active#30 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#30 remove_cache_srcu &c->lock irq_context: 0 kn->active#30 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#30 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#30 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#30 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 kn->active#27 &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#32 &c->lock irq_context: 0 kn->active#30 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 kn->active#30 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu irq_context: 0 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#31 &c->lock irq_context: 0 &type->i_mutex_dir_key#2 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sem->wait_lock irq_context: 0 sb_writers &p->pi_lock irq_context: 0 sb_writers &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &rq->__lock irq_context: 0 kn->active#31 &n->list_lock irq_context: 0 kn->active#31 &n->list_lock &c->lock irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 udc_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &n->list_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &pcp->lock &zone->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &p->lock &of->mutex kn->active#5 &rq->__lock irq_context: 0 kn->active#9 &c->lock irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: softirq rcu_callback &base->lock irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: softirq rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &mousedev->client_lock irq_context: 0 &mousedev->mutex#2 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 irq_context: 0 &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &hctx->lock irq_context: 0 rcu_read_lock &hctx->lock irq_context: 0 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 rcu_state.exp_mutex.wait_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 &rnp->exp_lock irq_context: 0 &evdev->mutex &dev->mutex#2 &rnp->exp_wq[3] irq_context: 0 &evdev->mutex &dev->mutex#2 &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &lock->wait_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &lock->wait_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &lock->wait_lock irq_context: 0 kn->active#11 &c->lock irq_context: 0 kn->active#5 &p->pi_lock irq_context: 0 kn->active#11 &n->list_lock irq_context: 0 kn->active#5 &p->pi_lock &rq->__lock irq_context: 0 kn->active#11 &n->list_lock &c->lock irq_context: 0 kn->active#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &pipe->rd_wait &ep->lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &ep->mtx rcu_read_lock &pipe->rd_wait irq_context: 0 &ep->mtx &obj_hash[i].lock irq_context: 0 &sighand->signalfd_wqh irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#14 irq_context: 0 mapping.invalidate_lock#2 irq_context: 0 mapping.invalidate_lock#2 mmu_notifier_invalidate_range_start irq_context: 0 mapping.invalidate_lock#2 &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#6 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#6 pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#6 &c->lock irq_context: 0 mapping.invalidate_lock#2 lock#4 irq_context: 0 mapping.invalidate_lock#2 tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock#2 &dd->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &dd->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rnp->exp_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rnp->exp_wq[2] irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 fw_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 mapping.invalidate_lock#2 lock#4 &lruvec->lru_lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock rcu_node_0 irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 tomoyo_ss &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock#2 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#2 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#2 &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#6 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &lruvec->lru_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &info->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &xa->xa_lock#6 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sem->wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u32.lock crngs.lock irq_context: 0 kn->active#34 fs_reclaim irq_context: 0 kn->active#34 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: softirq &(&wb->dwork)->timer irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock remove_cache_srcu &rq->__lock irq_context: 0 kn->active#29 &rq->__lock irq_context: 0 (wq_completion)writeback irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &p->sequence irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &obj_hash[i].lock irq_context: 0 kn->active#29 &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#28 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 kn->active#32 &n->list_lock irq_context: 0 kn->active#32 &n->list_lock &c->lock irq_context: 0 mapping.invalidate_lock#2 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 batched_entropy_u8.lock irq_context: 0 mapping.invalidate_lock#2 kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 &obj_hash[i].lock irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#33 &n->list_lock irq_context: 0 kn->active#33 &n->list_lock &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 kn->active#35 fs_reclaim irq_context: 0 kn->active#35 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#36 fs_reclaim irq_context: 0 kn->active#36 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#36 &c->lock irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#32 remove_cache_srcu &c->lock irq_context: 0 kn->active#32 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#32 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#27 &n->list_lock irq_context: 0 kn->active#27 &n->list_lock &c->lock irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &n->list_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &n->list_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rfkill->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#30 &n->list_lock irq_context: 0 kn->active#30 &n->list_lock &c->lock irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#27 remove_cache_srcu irq_context: 0 kn->active#27 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock irq_context: 0 kn->active#28 &n->list_lock irq_context: 0 kn->active#28 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#28 &____s->seqcount irq_context: 0 kn->active#29 remove_cache_srcu irq_context: 0 kn->active#29 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#29 remove_cache_srcu &c->lock irq_context: 0 kn->active#29 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#29 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 kn->active#37 fs_reclaim irq_context: 0 kn->active#37 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex sb_writers#4 batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 (&journal->j_commit_timer) irq_context: 0 &journal->j_checkpoint_mutex irq_context: 0 &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 &journal->j_checkpoint_mutex &dd->lock irq_context: 0 &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &base->lock irq_context: 0 &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &meta->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &lock->wait_lock irq_context: 0 &sig->cred_guard_mutex &stopper->lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &tags->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_updates irq_context: 0 &journal->j_list_lock irq_context: 0 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &ei->i_es_lock irq_context: 0 lock#4 irq_context: 0 lock#4 &lruvec->lru_lock irq_context: 0 &mapping->private_lock irq_context: 0 &ret->b_state_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock irq_context: 0 &ei->i_es_lock key#2 irq_context: 0 &dd->lock irq_context: 0 &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_list_lock irq_context: 0 rcu_read_lock &dd->lock irq_context: 0 rcu_read_lock &base->lock irq_context: 0 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 kn->active#37 &c->lock irq_context: 0 kn->active#37 &n->list_lock irq_context: 0 kn->active#37 &n->list_lock &c->lock irq_context: 0 kn->active#38 fs_reclaim irq_context: 0 kn->active#38 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#38 &c->lock irq_context: 0 kn->active#38 &n->list_lock irq_context: 0 kn->active#38 &n->list_lock &c->lock irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#38 i2c_dev_list_lock irq_context: 0 &sig->cred_guard_mutex &lock->wait_lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &memcg->move_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#6 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &sb->s_type->i_lock_key#3 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->list_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 &journal->j_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_md_lock irq_context: 0 &journal->j_fc_wait irq_context: 0 &journal->j_history_lock irq_context: 0 &sig->cred_guard_mutex &x->wait#8 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &n->list_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 kn->active#37 &____s->seqcount irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &n->list_lock irq_context: 0 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page) irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem quarantine_lock irq_context: 0 &mm->mmap_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 videodev_lock irq_context: 0 &dev_instance->mutex irq_context: 0 &dev_instance->mutex fs_reclaim irq_context: 0 &dev_instance->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev_instance->mutex pool_lock#2 irq_context: 0 &dev_instance->mutex vicodec_core:1844:(hdl)->_lock irq_context: 0 &dev_instance->mutex &c->lock irq_context: 0 &dev_instance->mutex &vdev->fh_lock irq_context: 0 &dev_instance->mutex &rq->__lock irq_context: 0 &dev_instance->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock irq_context: 0 &mdev->req_queue_mutex irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &m2m_dev->job_spinlock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &q->done_wq irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &q->mmap_lock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex &mdev->graph_mutex irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_node_0 irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->dev_mutex irq_context: 0 &dev->dev_mutex fs_reclaim irq_context: 0 &dev->dev_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->dev_mutex pool_lock#2 irq_context: 0 &dev->dev_mutex vim2m:1183:(hdl)->_lock irq_context: 0 &dev->dev_mutex &obj_hash[i].lock irq_context: 0 &dev->dev_mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &m2m_dev->job_spinlock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &q->done_wq irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &q->mmap_lock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_node_0 irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock percpu_counters_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 kn->active#39 fs_reclaim irq_context: 0 kn->active#39 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] batched_entropy_u8.lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] kfence_freelist_lock irq_context: 0 kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock irq_context: 0 &pipe->mutex/1 &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fh->state->lock irq_context: 0 &vdev->fh_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &cfs_rq->removed.lock irq_context: 0 kn->active#39 &c->lock irq_context: 0 &pipe->mutex/1 &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 pool_lock#2 irq_context: 0 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pool_lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &vcapture->lock irq_context: 0 &mdev->graph_mutex irq_context: 0 &mm->mmap_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex#3 irq_context: 0 &mdev->req_queue_mutex &dev->mutex#3 irq_context: 0 &mdev->req_queue_mutex &dev->mutex#3 &vdev->fh_lock irq_context: 0 kn->active#39 &____s->seqcount irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &p->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock pgd_lock irq_context: 0 &mm->mmap_lock key irq_context: 0 &mm->mmap_lock pcpu_lock irq_context: 0 &mm->mmap_lock percpu_counters_lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 batched_entropy_u8.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 kfence_freelist_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock irq_context: 0 &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &meta->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &meta->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem kfence_freelist_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 tasklist_lock &sighand->siglock kfence_freelist_lock irq_context: 0 &sighand->siglock &meta->lock irq_context: 0 &sighand->siglock kfence_freelist_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &u->bindlock irq_context: 0 &u->bindlock fs_reclaim irq_context: 0 &u->bindlock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &u->bindlock pool_lock#2 irq_context: 0 &u->bindlock batched_entropy_u32.lock irq_context: 0 &u->bindlock &net->unx.table.locks[i] irq_context: 0 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock irq_context: 0 &u->lock &u->lock/1 &dentry->d_lock irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock &sk->sk_peer_lock/1 irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock/1 irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &dentry->d_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu pgd_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu key irq_context: 0 &mm->mmap_lock remove_cache_srcu pcpu_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu percpu_counters_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 quarantine_lock irq_context: 0 &iint->mutex &lock->wait_lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock kfence_freelist_lock irq_context: softirq drivers/base/dd.c:321 irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock &k->list_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock &k->k_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work deferred_probe_mutex irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work deferred_probe_work irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &x->wait#10 irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &pool->lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &rq->__lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback quarantine_lock irq_context: 0 kn->active#39 &n->list_lock irq_context: 0 kn->active#39 &n->list_lock &c->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx key#11 irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &base->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &pcp->lock &zone->lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &base->lock irq_context: 0 &mm->mmap_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4/4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &xa->xa_lock#6 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &p->sequence irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &type->s_umount_key#31 &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &type->s_umount_key#40 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &c->lock irq_context: softirq rcu_read_lock &memcg->move_lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 irq_context: softirq rcu_read_lock &xa->xa_lock#6 &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 &base->lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 key#10 irq_context: softirq rcu_read_lock &xa->xa_lock#6 key#12 irq_context: softirq rcu_read_lock &xa->xa_lock#6 &wb->work_lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 &wb->work_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 &wb->work_lock &base->lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &base->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&wb->bw_dwork)->timer irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) &wb->list_lock irq_context: 0 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &base->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 kn->active#39 remove_cache_srcu irq_context: 0 kn->active#39 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &base->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock quarantine_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss &obj_hash[i].lock pool_lock irq_context: 0 &type->i_mutex_dir_key#5 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &obj_hash[i].lock irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (debug_obj_work).work pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&dom->period_timer) irq_context: softirq (&dom->period_timer) key#13 irq_context: softirq (&dom->period_timer) &p->sequence irq_context: softirq (&dom->period_timer) &obj_hash[i].lock irq_context: softirq (&dom->period_timer) &base->lock irq_context: softirq (&dom->period_timer) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock &of->mutex kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock batched_entropy_u8.lock irq_context: 0 &p->lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &base->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 kn->active#39 remove_cache_srcu &c->lock irq_context: 0 kn->active#39 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#39 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#40 fs_reclaim irq_context: 0 kn->active#40 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#40 &c->lock irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_node_0 irq_context: 0 kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss &base->lock irq_context: 0 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 kn->active#41 fs_reclaim irq_context: 0 kn->active#41 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 fs_reclaim irq_context: 0 kn->active#42 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#43 fs_reclaim irq_context: 0 kn->active#43 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#43 remove_cache_srcu irq_context: 0 kn->active#43 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#43 &c->lock irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#5 &base->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &meta->lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &n->list_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock/1 irq_context: 0 &u->iolock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rq->__lock irq_context: 0 &lo->lo_mutex irq_context: 0 &disk->open_mutex &lo->lo_mutex irq_context: 0 &disk->open_mutex nbd_index_mutex irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &bdev->bd_size_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &q->queue_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock set->srcu irq_context: 0 &disk->open_mutex &nbd->config_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock &x->wait#3 irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock irq_context: 0 &ep->mtx &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock set->srcu irq_context: 0 &disk->open_mutex &nbd->config_lock pool_lock#2 irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock &c->lock irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &meta->lock irq_context: 0 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &mousedev->mutex/1 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 &rfkill->lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#16 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#44 fs_reclaim irq_context: 0 kn->active#44 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#17 &c->lock irq_context: 0 kn->active#17 &____s->seqcount irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#18 &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &base->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &new->lock irq_context: 0 &disk->open_mutex &new->lock &mtdblk->cache_mutex irq_context: 0 kn->active#18 &____s->seqcount irq_context: 0 kn->active#15 &n->list_lock irq_context: 0 kn->active#15 &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &base->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock &of->mutex kn->active#15 &rq->__lock irq_context: 0 kn->active#15 &____s->seqcount irq_context: 0 kn->active#5 batched_entropy_u8.lock irq_context: 0 kn->active#5 kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#45 fs_reclaim irq_context: 0 kn->active#45 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#45 &c->lock irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mtd->master.chrdev_lock irq_context: 0 &mtd->master.chrdev_lock &mm->mmap_lock irq_context: 0 kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &pcp->lock &zone->lock irq_context: 0 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 batched_entropy_u8.lock irq_context: 0 &p->lock &of->mutex kn->active#5 kfence_freelist_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &meta->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_node_0 irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu quarantine_lock irq_context: 0 &p->lock fs_reclaim &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock &of->mutex &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 &mark->lock irq_context: 0 &group->inotify_data.idr_lock irq_context: 0 &group->inotify_data.idr_lock &obj_hash[i].lock irq_context: 0 &group->inotify_data.idr_lock pool_lock#2 irq_context: 0 destroy_lock irq_context: 0 fs/notify/mark.c:89 irq_context: 0 (wq_completion)events_unbound connector_reaper_work irq_context: 0 (wq_completion)events_unbound connector_reaper_work destroy_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &fsnotify_mark_srcu irq_context: 0 (reaper_work).work irq_context: 0 (wq_completion)events_unbound connector_reaper_work &obj_hash[i].lock irq_context: 0 &x->wait#10 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work destroy_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &x->wait#3 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &x->wait#3 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 &iint->mutex sb_writers#4 remove_cache_srcu irq_context: 0 &iint->mutex sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 &iint->mutex sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 &iint->mutex sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 &iint->mutex sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &iint->mutex &n->list_lock irq_context: 0 &iint->mutex &n->list_lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 userns_state_mutex irq_context: 0 &ei->xattr_sem &mapping->private_lock irq_context: 0 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex _xmit_LOOPBACK irq_context: 0 rtnl_mutex netpoll_srcu irq_context: 0 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex &im->lock irq_context: 0 rtnl_mutex fib_info_lock irq_context: 0 rtnl_mutex rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex cbs_list_lock irq_context: 0 rtnl_mutex &ndev->lock irq_context: 0 rtnl_mutex &idev->mc_lock irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem irq_context: 0 rtnl_mutex rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex &ifa->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &tb->tb6_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &ndev->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_INET irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback &dir->lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_INET6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dd->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#5 tomoyo_ss irq_context: 0 sb_writers#5 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &xattrs->lock irq_context: 0 &u->lock/1 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &mapping->i_mmap_rwsem irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key &xa->xa_lock#6 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#5 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &lruvec->lru_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock irq_context: 0 &f->f_pos_lock sb_writers#5 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &info->lock irq_context: 0 &sb->s_type->i_lock_key#4 irq_context: 0 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 sk_lock-AF_INET irq_context: 0 sk_lock-AF_INET slock-AF_INET irq_context: 0 slock-AF_INET irq_context: 0 sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 slock-AF_INET6 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock key#6 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss pool_lock#2 irq_context: 0 sk_lock-AF_INET &table->hash[i].lock irq_context: 0 sk_lock-AF_INET &table->hash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 sk_lock-AF_NETLINK &mm->mmap_lock irq_context: 0 sk_lock-AF_NETLINK fs_reclaim irq_context: 0 sk_lock-AF_NETLINK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_NETLINK pool_lock#2 irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock pool_lock#2 irq_context: 0 sk_lock-AF_NETLINK vmap_area_lock irq_context: 0 sk_lock-AF_NETLINK &____s->seqcount irq_context: 0 sk_lock-AF_NETLINK pcpu_alloc_mutex irq_context: 0 sk_lock-AF_NETLINK pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_NETLINK &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK pack_mutex irq_context: 0 sk_lock-AF_NETLINK batched_entropy_u32.lock irq_context: 0 sk_lock-AF_NETLINK text_mutex irq_context: 0 sk_lock-AF_NETLINK text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_NETLINK &fp->aux->used_maps_mutex irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock pool_lock#2 irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#22 irq_context: 0 &u->iolock &____s->seqcount irq_context: 0 &u->iolock rcu_read_lock pool_lock#2 irq_context: 0 kn->active#46 fs_reclaim irq_context: 0 kn->active#46 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#46 &c->lock irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &p->lock cpufreq_driver_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &n->list_lock irq_context: 0 rtnl_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock &ifa->lock irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq#2 irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex irq_context: 0 cb_lock &c->lock irq_context: 0 cb_lock rtnl_mutex irq_context: 0 cb_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &c->lock irq_context: 0 cb_lock &n->list_lock irq_context: 0 cb_lock &n->list_lock &c->lock irq_context: 0 dev_addr_sem irq_context: 0 sb_writers#3 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#3 tomoyo_ss &n->list_lock &c->lock irq_context: 0 cb_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: 0 cb_lock nlk_cb_mutex-GENERIC irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nlk_cb_mutex-GENERIC pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rlock-AF_NETLINK irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &____s->seqcount irq_context: softirq (&net->sctp.addr_wq_timer) irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &pipe->rd_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 fs_reclaim irq_context: 0 sb_writers#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &c->lock irq_context: 0 sb_writers#5 sb_writers#5 mount_lock irq_context: 0 sb_writers#5 sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 sb_writers#5 &wb->list_lock irq_context: 0 sb_writers#5 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key &xa->xa_lock#6 irq_context: 0 sb_writers#5 lock#4 irq_context: 0 sb_writers#5 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 lock#5 irq_context: 0 sb_writers#5 &lruvec->lru_lock irq_context: 0 sb_writers#5 rcu_read_lock pool_lock#2 irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &c->lock irq_context: 0 &ep->mtx rcu_read_lock &sighand->signalfd_wqh irq_context: 0 &ep->mtx rcu_read_lock &ei->socket.wq.wait irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &sem->wait_lock irq_context: 0 &mm->mmap_lock &p->pi_lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 percpu_counters_lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &pl->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &pl->lock key#12 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnettable->lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex irq_context: 0 rtnl_mutex napi_hash_lock irq_context: 0 rtnl_mutex lapb_list_lock irq_context: 0 rtnl_mutex remove_cache_srcu irq_context: 0 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex x25_neigh_list_lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 lock pidmap_lock &c->lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &u->lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex _xmit_ETHER irq_context: 0 rtnl_mutex &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 rtnl_mutex _xmit_SLIP irq_context: 0 sb_writers#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&eql->timer) irq_context: softirq (&eql->timer) &eql->queue.lock irq_context: softirq (&eql->timer) &obj_hash[i].lock irq_context: softirq (&eql->timer) &base->lock irq_context: softirq (&eql->timer) &base->lock &obj_hash[i].lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock key#14 irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex &vi->refill_lock irq_context: softirq _xmit_ETHER#2 irq_context: 0 rtnl_mutex noop_qdisc.q.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &sem->wait_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &n->list_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rfkill->lock irq_context: 0 rtnl_mutex &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->mutex irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy0 irq_context: 0 (wq_completion)phy0 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy0 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER &n->list_lock irq_context: 0 rtnl_mutex _xmit_ETHER &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock noop_qdisc.q.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &sch->q.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex class irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex cbs_list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy1 irq_context: 0 (wq_completion)phy1 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy1 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex _xmit_VOID irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 &u->iolock &u->lock irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &u->iolock &mm->mmap_lock &____s->seqcount irq_context: 0 &u->iolock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sb_writers#3 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#3 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#3 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#3 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex pgd_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex key irq_context: 0 &sig->cred_guard_mutex &iint->mutex pcpu_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &cfs_rq->removed.lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex ima_extend_list_mutex &n->list_lock irq_context: 0 &iint->mutex ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u16.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex _xmit_X25 irq_context: 0 rtnl_mutex lapb_list_lock irq_context: 0 rtnl_mutex lapb_list_lock pool_lock#2 irq_context: 0 rtnl_mutex lapb_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex lapb_list_lock &base->lock irq_context: 0 rtnl_mutex lapb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapbeth->up_lock irq_context: 0 rtnl_mutex &lapb->lock irq_context: 0 rtnl_mutex &lapb->lock pool_lock#2 irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex &lapb->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock &base->lock irq_context: 0 rtnl_mutex &lapb->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_callback &ul->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->xattr_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &base->lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: softirq rcu_callback rcu_read_lock rt6_exception_lock irq_context: 0 &tty->legacy_mutex &tty->ldisc_sem irq_context: 0 &tty->legacy_mutex tasklist_lock irq_context: 0 &tty->legacy_mutex tasklist_lock &sighand->siglock irq_context: 0 &tty->legacy_mutex tasklist_lock &sighand->siglock &tty->ctrl.lock irq_context: 0 &tty->ldisc_sem rcu_read_lock &tty->ctrl.lock irq_context: 0 &mm->mmap_lock pgd_lock irq_context: 0 &mm->mmap_lock key irq_context: 0 &mm->mmap_lock pcpu_lock irq_context: 0 &mm->mmap_lock percpu_counters_lock irq_context: 0 &tty->ctrl.lock irq_context: 0 tasklist_lock rcu_read_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 &tty->legacy_mutex &tty->ctrl.lock irq_context: 0 &tty->legacy_mutex &f->f_lock irq_context: 0 &tty->legacy_mutex &f->f_lock fasync_lock irq_context: 0 &tty->legacy_mutex &obj_hash[i].lock irq_context: 0 &tty->legacy_mutex pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 rcu_read_lock &tty->ctrl.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 &port_lock_key irq_context: 0 &buf->lock irq_context: 0 &tty->ldisc_sem &port_lock_key irq_context: 0 &tty->ldisc_sem &port->lock irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->flow.lock irq_context: softirq &(&idev->mc_dad_work)->timer irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &tty->termios_rwsem &tty->read_wait irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock (work_completion)(&buf->work) irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &rq->__lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq _xmit_ETHER#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 kfence_freelist_lock irq_context: 0 &net->packet.sklist_lock irq_context: 0 sk_lock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET slock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &po->bind_lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock ptype_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_node_0 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock irq_context: 0 sk_lock-AF_PACKET &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock ptype_lock irq_context: 0 slock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock irq_context: 0 sk_lock-AF_PACKET fs_reclaim irq_context: 0 sk_lock-AF_PACKET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PACKET pool_lock#2 irq_context: 0 sk_lock-AF_PACKET free_vmap_area_lock irq_context: 0 sk_lock-AF_PACKET free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET vmap_area_lock irq_context: 0 sk_lock-AF_PACKET &____s->seqcount irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_PACKET pack_mutex irq_context: 0 sk_lock-AF_PACKET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_PACKET text_mutex irq_context: 0 sk_lock-AF_PACKET text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_PACKET &fp->aux->used_maps_mutex irq_context: 0 rlock-AF_PACKET irq_context: 0 wlock-AF_PACKET irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)dm_bufio_cache irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) dm_bufio_clients_lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &base->lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 rtnl_mutex class irq_context: 0 rtnl_mutex (&tbl->proxy_timer) irq_context: softirq &(&idev->mc_ifc_work)->timer irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock &obj_hash[i].lock irq_context: 0 lock pidmap_lock &n->list_lock irq_context: 0 lock pidmap_lock &n->list_lock &c->lock irq_context: softirq rcu_callback &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock krc.lock irq_context: 0 rtnl_mutex &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rt6_exception_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 fs_reclaim irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock irq_context: 0 sk_lock-AF_INET6 once_lock irq_context: 0 sk_lock-AF_INET6 once_lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &u->iolock &base->lock irq_context: 0 &u->iolock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &obj_hash[i].lock pool_lock irq_context: 0 &p->lock &mm->mmap_lock fs_reclaim irq_context: 0 &p->lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &mm->mmap_lock &____s->seqcount irq_context: 0 &p->lock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 &n->list_lock irq_context: 0 sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock pool_lock irq_context: softirq &(&ifa->dad_work)->timer irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_PACKET &c->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq _xmit_ETHER#2 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock once_lock irq_context: softirq rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock kfence_freelist_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock once_lock irq_context: softirq rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &p->lock &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->packet.sklist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock ptype_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &dir->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET slock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 fanout_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &pcp->lock &zone->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &xa->xa_lock#6 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &rq_wait->wait irq_context: softirq &rq_wait->wait &p->pi_lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &rq_wait->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq &ei->i_completed_io_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ei->i_completed_io_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ext4__ioend_wq[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ret->b_uptodate_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 key#12 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &wb->work_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&dev->watchdog_timer) irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &obj_hash[i].lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &base->lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &base->lock &obj_hash[i].lock irq_context: 0 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->work_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->work_lock &base->lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) irq_context: softirq (&lapb->t1timer) &lapb->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock &base->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &base->lock &obj_hash[i].lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&lapb->t1timer) &lapb->lock batched_entropy_u8.lock irq_context: softirq (&lapb->t1timer) &lapb->lock kfence_freelist_lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &____s->seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &meta->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq drivers/regulator/core.c:6266 irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (regulator_init_complete_work).work irq_context: 0 (wq_completion)events (regulator_init_complete_work).work &k->list_lock irq_context: 0 (wq_completion)events (regulator_init_complete_work).work &k->k_lock irq_context: 0 sk_lock-AF_PACKET &n->list_lock irq_context: 0 sk_lock-AF_PACKET &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock fs_reclaim irq_context: 0 &pipe->mutex/1 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sb_writers#5 &dentry->d_lock irq_context: 0 hostname_poll.wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock pgd_lock irq_context: 0 &vma->vm_lock->lock key irq_context: 0 &vma->vm_lock->lock pcpu_lock irq_context: 0 &vma->vm_lock->lock percpu_counters_lock irq_context: 0 &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock once_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock irq_context: softirq rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock &n->lock irq_context: softirq rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rlock-AF_NETLINK irq_context: softirq rcu_read_lock rcu_read_lock &dir->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET batched_entropy_u16.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &queue->rskq_lock irq_context: 0 sk_lock-AF_INET clock-AF_INET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &____s->seqcount irq_context: 0 sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_INET &c->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock irq_context: 0 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET &sd->defer_lock irq_context: softirq &sd->defer_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) quarantine_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: softirq rcu_callback uidhash_lock irq_context: softirq rcu_callback ucounts_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 quarantine_lock irq_context: softirq (&icsk->icsk_retransmit_timer) irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 &pipe->wr_wait irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &vma->vm_lock->lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &journal->j_list_lock &c->lock irq_context: 0 &journal->j_list_lock pool_lock#2 irq_context: 0 crngs.lock base_crng.lock irq_context: 0 tasklist_lock &sighand->siglock batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &vma->vm_lock->lock batched_entropy_u8.lock irq_context: 0 &vma->vm_lock->lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET &rq->__lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->wr_wait &p->pi_lock irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET slock-AF_INET tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 sk_lock-AF_INET &n->list_lock irq_context: 0 sk_lock-AF_INET &n->list_lock &c->lock irq_context: softirq slock-AF_INET tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_PACKET &rnp->exp_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex.wait_lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_node_0 irq_context: 0 &pipe->wr_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET kfence_freelist_lock irq_context: 0 sk_lock-AF_INET &meta->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq slock-AF_INET &obj_hash[i].lock irq_context: softirq slock-AF_INET &base->lock irq_context: softirq slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 elock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu irq_context: 0 sk_lock-AF_INET remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET remove_cache_srcu &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 lock#5 irq_context: 0 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET batched_entropy_u8.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET kfence_freelist_lock irq_context: 0 &u->iolock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex kfence_freelist_lock irq_context: 0 kn->active#47 fs_reclaim irq_context: 0 kn->active#47 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#47 &c->lock irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock khugepaged_mm_lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock &p->pi_lock irq_context: 0 lock#3 &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#5 irq_context: 0 lock#3 (work_completion)(work) irq_context: 0 rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock &sighand->siglock pool_lock#2 irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock irq_context: 0 &futex_queues[i].lock irq_context: 0 rcu_read_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 rcu_read_lock &sighand->siglock kfence_freelist_lock irq_context: 0 rcu_read_lock &sighand->siglock &____s->seqcount irq_context: 0 &ep->mtx &ep->lock &ep->wq irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &lock->wait_lock irq_context: 0 &ep->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock cpufreq_driver_lock irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &mm->mmap_lock irq_context: 0 kn->active#4 &c->lock irq_context: 0 &ep->mtx kn->active#4 fs_reclaim irq_context: 0 &ep->mtx kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx kn->active#4 pool_lock#2 irq_context: 0 &ep->mtx kn->active#4 &on->poll irq_context: 0 &f->f_pos_lock &p->lock &of->mutex irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#4 param_lock irq_context: 0 &ep->mtx rcu_read_lock &on->poll irq_context: 0 &f->f_pos_lock &p->lock batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock &p->lock kfence_freelist_lock irq_context: 0 &f->f_pos_lock &p->lock &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock &n->list_lock &c->lock irq_context: 0 kn->active#4 &n->list_lock irq_context: 0 kn->active#4 &n->list_lock &c->lock irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &ep->mtx &obj_hash[i].lock pool_lock irq_context: 0 &ep->mtx kn->active#4 &c->lock irq_context: 0 &ep->mtx kn->active#4 &rq->__lock irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#4 remove_cache_srcu irq_context: 0 kn->active#4 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#4 remove_cache_srcu &c->lock irq_context: 0 kn->active#4 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &p->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock module_mutex irq_context: 0 sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET once_mutex irq_context: 0 sk_lock-AF_INET once_mutex crngs.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &pipe->wr_wait irq_context: 0 rcu_read_lock tasklist_lock irq_context: 0 &ep->mtx rcu_read_lock &pipe->wr_wait irq_context: 0 &ep->mtx rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#9 irq_context: 0 sb_writers#9 &attr->mutex irq_context: 0 sb_writers#9 &attr->mutex &mm->mmap_lock irq_context: 0 sb_writers#3 &p->pi_lock irq_context: 0 sb_writers#3 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->xattr_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->xattr_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#41/1 irq_context: 0 &type->s_umount_key#41/1 fs_reclaim irq_context: 0 &type->s_umount_key#41/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 pool_lock#2 irq_context: 0 &type->s_umount_key#41/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#41/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#41/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#41/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#41/1 sb_lock irq_context: 0 &type->s_umount_key#41/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#41/1 &dentry->d_lock irq_context: 0 sb_writers#10 irq_context: 0 sb_writers#10 mount_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq#2 irq_context: 0 kn->active#48 fs_reclaim irq_context: 0 kn->active#48 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#48 &c->lock irq_context: 0 kn->active#48 &n->list_lock irq_context: 0 kn->active#48 &n->list_lock &c->lock irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#48 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#15 irq_context: 0 sb_writers#10 fs_reclaim irq_context: 0 sb_writers#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &mm->mmap_lock irq_context: 0 sb_writers#10 &of->mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#10 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 cgroup_mutex fs_reclaim irq_context: 0 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cgroup_mutex &n->list_lock &c->lock irq_context: 0 cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cgroup_mutex css_set_lock cgroup_file_kn_lock irq_context: 0 &type->s_umount_key#42/1 irq_context: 0 &type->s_umount_key#42/1 fs_reclaim irq_context: 0 &type->s_umount_key#42/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 pool_lock#2 irq_context: 0 &type->s_umount_key#42/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#42/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#42/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#42/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#42/1 sb_lock irq_context: 0 &type->s_umount_key#42/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#42/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#43 irq_context: 0 &type->s_umount_key#43 shrinker_rwsem irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock pool_lock#2 irq_context: 0 &type->s_umount_key#43 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#43 rename_lock.seqcount irq_context: 0 &type->s_umount_key#43 &dentry->d_lock irq_context: 0 &type->s_umount_key#43 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#43 &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#43 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#43 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#43 inode_hash_lock irq_context: 0 &type->s_umount_key#43 inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#43 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#43 pool_lock#2 irq_context: 0 &type->s_umount_key#43 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#43 sb_lock irq_context: 0 &type->s_umount_key#42/1 &c->lock irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cgroup_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cgroup_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &c->lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &type->s_umount_key#43 &obj_hash[i].lock pool_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem &c->lock irq_context: hardirq log_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) cgroup_mutex irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex cgroup_rstat_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) percpu_ref_switch_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &cgrp->pidlist_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) (wq_completion)cgroup_pidlist_destroy irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) (work_completion)(&cgrp->release_agent_work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_rstat_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) pcpu_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex fs_reclaim irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &c->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 irq_context: 0 sb_writers#11 mount_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 cgroup_mutex cpuset_rwsem irq_context: 0 cgroup_mutex cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_mutex cpuset_rwsem &obj_hash[i].lock irq_context: 0 cgroup_mutex cpuset_rwsem callback_lock irq_context: 0 cgroup_mutex cpuset_rwsem.waiters.lock irq_context: 0 cgroup_mutex cpuset_rwsem.rss.gp_wait.lock irq_context: 0 cgroup_mutex cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cgroup_mutex &dom->lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem callback_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem.waiters.lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem.rss.gp_wait.lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &dom->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &type->s_umount_key#42/1 &n->list_lock irq_context: 0 &type->s_umount_key#42/1 &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq#2 irq_context: 0 kn->active#49 fs_reclaim irq_context: 0 kn->active#49 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#49 &c->lock irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#16 irq_context: 0 sb_writers#11 fs_reclaim irq_context: 0 sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &mm->mmap_lock irq_context: 0 sb_writers#11 &of->mutex irq_context: 0 sb_writers#11 &obj_hash[i].lock irq_context: 0 kn->active#50 fs_reclaim irq_context: 0 kn->active#50 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &c->lock irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_rwsem irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_rwsem.waiters.lock irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#9 &mm->mmap_lock irq_context: 0 &type->s_umount_key#44 irq_context: 0 &type->s_umount_key#44 sb_lock irq_context: 0 &type->s_umount_key#44 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#26 irq_context: 0 sb_writers#12 irq_context: 0 sb_writers#12 fs_reclaim irq_context: 0 sb_writers#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 pool_lock#2 irq_context: 0 sb_writers#12 &mm->mmap_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rename_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 fs_reclaim irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pool_lock#2 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &dentry->d_lock &wq irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &sb->s_type->i_lock_key#26 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &s->s_inode_list_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 tk_core.seq.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pin_fs_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &c->lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 sb_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 sb_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mnt_id_ida.xa_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pcpu_alloc_mutex irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mount_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mount_lock mount_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &obj_hash[i].lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock mount_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &sb->s_type->i_lock_key#26 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 entries_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex irq_context: 0 rtnl_mutex dev_addr_sem irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tn->lock irq_context: 0 rtnl_mutex dev_addr_sem &sdata->sec_mtx irq_context: 0 rtnl_mutex dev_addr_sem &sdata->sec_mtx &sec->lock irq_context: 0 rtnl_mutex dev_addr_sem fs_reclaim irq_context: 0 rtnl_mutex dev_addr_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex dev_addr_sem pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem rlock-AF_NETLINK irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock irq_context: 0 rtnl_mutex dev_addr_sem &pn->hash_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem input_pool.lock irq_context: 0 rtnl_mutex _xmit_IEEE802154 irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: softirq rcu_callback cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex proc_subdir_lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex proc_subdir_lock irq_context: 0 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &____s->seqcount irq_context: 0 rtnl_mutex quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#15 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: softirq (&timer) irq_context: softirq (&timer) &obj_hash[i].lock irq_context: softirq (&timer) &base->lock irq_context: softirq (&timer) &base->lock &obj_hash[i].lock irq_context: softirq (&timer) rcu_read_lock pool_lock#2 irq_context: softirq (&timer) rcu_read_lock &c->lock irq_context: softirq (&timer) rcu_read_lock &____s->seqcount irq_context: softirq (&timer) &txlock irq_context: softirq (&timer) &txlock &list->lock#3 irq_context: softirq (&timer) &txwq irq_context: softirq (&timer) &txwq &p->pi_lock irq_context: softirq (&timer) &txwq &p->pi_lock &rq->__lock irq_context: softirq (&timer) &txwq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh &list->lock#5 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock &____s->seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM (console_sem).lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM pool_lock#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock pool_lock#2 irq_context: softirq &list->lock#5 irq_context: 0 &wb->list_lock irq_context: 0 &sbi->s_writepages_rwsem irq_context: 0 &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &wb->work_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_commit irq_context: 0 &ret->b_state_lock &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 &n->list_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 &iint->mutex rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#8 vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#8 init_mm.page_table_lock irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock &p->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex percpu_ref_switch_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock &p->lock#2 swap_avail_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 proc_poll_wait.lock irq_context: 0 swap_slots_cache_enable_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &x->wait#6 irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up swap_slots_cache_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 swap_slots_cache_enable_mutex swap_lock irq_context: 0 &sighand->siglock rcu_read_lock &____s->seqcount#5 irq_context: 0 &sighand->siglock &prev->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq _xmit_ETHER#2 quarantine_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock irq_context: softirq _xmit_ETHER#2 &meta->lock irq_context: softirq _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu irq_context: 0 &vma->vm_lock->lock remove_cache_srcu quarantine_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &c->lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &n->list_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET quarantine_lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 &rq->__lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &rq->__lock rcu_read_lock &base->lock irq_context: 0 &rq->__lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 lock#3 rcu_read_lock (wq_completion)mm_percpu_wq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) irq_context: 0 lock#3 &x->wait#10 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 lock#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 lock#3 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page)#2 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock &cfs_rq->removed.lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_node_0 irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &rq->__lock irq_context: 0 &sighand->siglock quarantine_lock irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rq->__lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &____s->seqcount irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &rq->__lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq net/wireless/reg.c:236 irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem (&timer.timer) irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fw_lock &x->wait#23 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#79 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem deferred_probe_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem device_links_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fw_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_indoor_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events reg_work rtnl_mutex &c->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: softirq &c->lock batched_entropy_u8.lock irq_context: softirq &c->lock kfence_freelist_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq net/wireless/reg.c:533 irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_indoor_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock crngs.lock irq_context: softirq (&n->timer) irq_context: softirq (&n->timer) &n->lock irq_context: softirq (&n->timer) &n->lock &obj_hash[i].lock irq_context: softirq (&n->timer) &n->lock &base->lock irq_context: softirq (&n->timer) &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#6 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &c->lock irq_context: softirq (&cb->timer) &rq_wait->wait irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock &rq->__lock irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &pl->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 &pl->lock key#12 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#6 key#13 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &rq->__lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock crngs.lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: softirq (&dom->period_timer) &p->sequence key#13 irq_context: 0 sk_lock-AF_INET remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#31 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 lock#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 sb_writers#3 tomoyo_ss &rq->__lock irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &rq->__lock irq_context: 0 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &stopper->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &stop_pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &p->lock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&sk->sk_timer) irq_context: softirq (&sk->sk_timer) slock-AF_INET irq_context: softirq (&sk->sk_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&sk->sk_timer) slock-AF_INET &obj_hash[i].lock irq_context: softirq (&sk->sk_timer) slock-AF_INET &base->lock irq_context: softirq (&sk->sk_timer) slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &ep->mtx &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: softirq (&wq_watchdog_timer) &obj_hash[i].lock irq_context: softirq (&wq_watchdog_timer) &base->lock irq_context: softirq (&wq_watchdog_timer) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &c->lock batched_entropy_u8.lock crngs.lock irq_context: softirq &c->lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &rq->__lock irq_context: 0 rcu_read_lock &sighand->siglock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &sighand->siglock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 sb_writers#5 tomoyo_ss &rq->__lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 irq_context: 0 &mm->mmap_lock &info->lock irq_context: 0 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock mount_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &wb->list_lock irq_context: 0 &mm->mmap_lock &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &n->list_lock irq_context: 0 &mm->mmap_lock sb_writers#5 mount_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_writers#5 &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#5 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock sb_writers#5 &wb->list_lock irq_context: 0 &mm->mmap_lock sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 &____s->seqcount irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 pool_lock#2 irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock key#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rq->__lock irq_context: 0 &newf->file_lock &newf->resize_wait irq_context: 0 &kcov->lock irq_context: 0 &mm->mmap_lock &kcov->lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &kcov->lock kcov_remote_lock irq_context: 0 &kcov->lock kcov_remote_lock pool_lock#2 irq_context: 0 pid_caches_mutex irq_context: 0 pid_caches_mutex &rq->__lock irq_context: softirq (&lapb->t1timer) &lapb->lock &list->lock#6 irq_context: softirq (&lapb->t1timer) &lapb->lock &list->lock#7 irq_context: softirq &list->lock#7 irq_context: softirq rcu_read_lock x25_neigh_list_lock irq_context: softirq rcu_read_lock &list->lock#8 irq_context: softirq rcu_read_lock x25_list_lock irq_context: softirq rcu_read_lock x25_forward_list_lock irq_context: 0 pid_caches_mutex slab_mutex irq_context: 0 pid_caches_mutex slab_mutex fs_reclaim irq_context: 0 pid_caches_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pid_caches_mutex slab_mutex pool_lock#2 irq_context: 0 pid_caches_mutex slab_mutex &c->lock irq_context: 0 pid_caches_mutex slab_mutex &n->list_lock irq_context: 0 pid_caches_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 pid_caches_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem irq_context: 0 pid_caches_mutex slab_mutex &k->list_lock irq_context: 0 pid_caches_mutex slab_mutex lock irq_context: 0 pid_caches_mutex slab_mutex lock kernfs_idr_lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#45 irq_context: 0 &type->s_umount_key#45 sb_lock irq_context: 0 &type->s_umount_key#45 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock hci_sk_list.lock irq_context: 0 misc_mtx &base->lock irq_context: 0 misc_mtx &base->lock &obj_hash[i].lock irq_context: 0 (work_completion)(&(&data->open_timeout)->work) irq_context: 0 &data->open_mutex irq_context: 0 &data->open_mutex fs_reclaim irq_context: 0 &data->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &c->lock irq_context: 0 &data->open_mutex pool_lock#2 irq_context: 0 &data->open_mutex &____s->seqcount irq_context: 0 &data->open_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex &obj_hash[i].lock pool_lock irq_context: 0 &data->open_mutex &x->wait#9 irq_context: 0 &data->open_mutex hci_index_ida.xa_lock irq_context: 0 &data->open_mutex &n->list_lock irq_context: 0 &data->open_mutex &n->list_lock &c->lock irq_context: 0 &data->open_mutex cpu_hotplug_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &data->open_mutex wq_pool_mutex irq_context: 0 &data->open_mutex wq_pool_mutex &wq->mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 &data->open_mutex pin_fs_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &data->open_mutex &k->list_lock irq_context: 0 &data->open_mutex gdp_mutex irq_context: 0 &data->open_mutex gdp_mutex &k->list_lock irq_context: 0 &data->open_mutex gdp_mutex fs_reclaim irq_context: 0 &data->open_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex gdp_mutex pool_lock#2 irq_context: 0 &data->open_mutex gdp_mutex lock irq_context: 0 &data->open_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex lock irq_context: 0 &data->open_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex bus_type_sem irq_context: 0 &data->open_mutex sysfs_symlink_target_lock irq_context: 0 &data->open_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex &dev->power.lock irq_context: 0 &data->open_mutex dpm_list_mtx irq_context: 0 &data->open_mutex uevent_sock_mutex irq_context: 0 &data->open_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &data->open_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &data->open_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex subsys mutex#80 irq_context: 0 &data->open_mutex subsys mutex#80 &k->k_lock irq_context: 0 &data->open_mutex &dev->devres_lock irq_context: 0 &data->open_mutex triggers_list_lock irq_context: 0 &data->open_mutex leds_list_lock irq_context: 0 &data->open_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 &data->open_mutex rfkill_global_mutex irq_context: 0 &data->open_mutex rfkill_global_mutex fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex rfkill_global_mutex pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex &k->list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex bus_type_sem irq_context: 0 &data->open_mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &n->list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex &dev->power.lock irq_context: 0 &data->open_mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 &data->open_mutex rfkill_global_mutex &rfkill->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex &k->k_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex subsys mutex#40 irq_context: 0 &data->open_mutex rfkill_global_mutex subsys mutex#40 &k->k_lock irq_context: 0 &data->open_mutex rfkill_global_mutex triggers_list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex leds_list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex.wait_lock irq_context: 0 &data->open_mutex &p->pi_lock irq_context: 0 &data->open_mutex &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex &rq->__lock irq_context: 0 &data->open_mutex &rfkill->lock irq_context: 0 &data->open_mutex hci_dev_list_lock irq_context: 0 &data->open_mutex tk_core.seq.seqcount irq_context: 0 &data->open_mutex hci_sk_list.lock irq_context: 0 &data->open_mutex (pm_chain_head).rwsem irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &data->open_mutex &list->lock#11 irq_context: 0 &data->open_mutex &data->read_wait irq_context: 0 &list->lock#11 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI sock_cookie_ida.xa_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &p->alloc_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &obj_hash[i].lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 hci_dev_list_lock irq_context: 0 rtnl_mutex &app->lock irq_context: 0 &data->read_wait irq_context: 0 rtnl_mutex (&app->join_timer) irq_context: 0 rtnl_mutex (&app->periodic_timer) irq_context: 0 rtnl_mutex &list->lock#14 irq_context: 0 rtnl_mutex (&app->join_timer)#2 irq_context: 0 rtnl_mutex &app->lock#2 irq_context: 0 rtnl_mutex &list->lock#15 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex pool_lock#2 irq_context: 0 &list->lock#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#10 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#10 &devlink_port->type_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->lock &net->xfrm.xfrm_state_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&x->rtimer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem crypto_default_null_skcipher_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem crypto_default_null_skcipher_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem crypto_default_null_skcipher_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset (&map->gc) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex prog_idr_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock &c->lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex prog_idr_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#10 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 &____s->seqcount#2 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 &____s->seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &pool->lock/1 &x->wait#10 irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &p->lock fs_reclaim &obj_hash[i].lock irq_context: 0 &p->lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock sysctl_lock irq_context: 0 rtnl_mutex rcu_read_lock &sb->s_type->i_lock_key#23 irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#23 irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 rtnl_mutex &dentry->d_lock irq_context: 0 rtnl_mutex rename_lock.seqcount irq_context: 0 rtnl_mutex rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &dentry->d_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &lru->node[i].lock irq_context: 0 &hdev->req_lock irq_context: 0 &hdev->req_lock pool_lock#2 irq_context: 0 &hdev->req_lock &list->lock#10 irq_context: 0 &hdev->req_lock &list->lock#9 irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &hdev->req_lock &hdev->req_wait_q irq_context: 0 &hdev->req_lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &base->lock irq_context: 0 &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &rq->__lock irq_context: 0 &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xdp.lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xdp.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &s->s_inode_list_lock irq_context: 0 rtnl_mutex &dentry->d_lock &lru->node[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &base->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &base->lock &obj_hash[i].lock irq_context: 0 css_set_lock cgroup_file_kn_lock kernfs_notify_lock irq_context: 0 css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock tasklist_lock irq_context: 0 &kcov->lock kcov_remote_lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem rcu_node_0 irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &ep->lock irq_context: 0 cgroup_threadgroup_rwsem &sighand->siglock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &ep->lock &ep->wq irq_context: 0 cgroup_threadgroup_rwsem &sighand->siglock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kcov_remote_lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &rq->__lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &n->list_lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 pool_lock#2 irq_context: 0 rtnl_mutex &p->tcfa_lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex prog_idr_lock irq_context: 0 rtnl_mutex bpf_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &obj_hash[i].lock pool_lock irq_context: 0 &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &n->list_lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#19 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#19 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI sock_cookie_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 &sb->s_type->i_mutex_key#10 hci_dev_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#19 irq_context: 0 namespace_sem rcu_read_lock &pool->lock irq_context: 0 namespace_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 namespace_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 namespace_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 namespace_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem &rq->__lock irq_context: 0 namespace_sem mnt_id_ida.xa_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &type->s_umount_key#22/1 &n->list_lock irq_context: 0 &type->s_umount_key#22/1 &n->list_lock &c->lock irq_context: 0 rcu_read_lock &undo_list->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 tk_core.seq.seqcount irq_context: 0 bt_proto_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#3 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#3 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &hdev->req_lock &c->lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->lock &ei->socket.wq.wait &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &sem->wait_lock irq_context: 0 rcu_read_lock &sem->wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fib_info_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex lweventlist_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock sysctl_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 &data->open_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 &data->open_mutex uevent_sock_mutex &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &nr_netdev_addr_lock_key irq_context: 0 rtnl_mutex listen_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_lock nl_table_wait.lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nl_table_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 pernet_ops_rwsem &n->list_lock irq_context: 0 pernet_ops_rwsem &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: 0 pernet_ops_rwsem uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &data->open_mutex uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &wg->socket_update_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &data->open_mutex rfkill_global_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &table->hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &table->hash[i].lock &table->hash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock k-slock-AF_INET6 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock k-clock-AF_INET6 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &peer->endpoint_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &peer->endpoint_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &peer->endpoint_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex lweventlist_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock _xmit_NONE#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#9 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#9 &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#9 &devlink_port->type_lock irq_context: softirq (&mp->timer) &br->multicast_lock &____s->seqcount#2 irq_context: softirq (&mp->timer) &br->multicast_lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#9 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock krc.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem &tn->lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 lweventlist_lock &dir->lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &mapping->private_lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &tn->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &x->wait#9 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &c->lock irq_context: 0 &data->open_mutex uevent_sock_mutex &c->lock irq_context: 0 &data->open_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)mm_percpu_wq &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dir->lock#2 &meta->lock irq_context: 0 &dir->lock#2 kfence_freelist_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock rcu_node_0 irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &data->open_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nl_table_lock nl_table_wait.lock irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex &data->lock irq_context: 0 (wq_completion)hci4 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 (wq_completion)hci3#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_connlabels_lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 pernet_ops_rwsem nf_log_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu irq_context: 0 pernet_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 pernet_ops_rwsem &this->receive_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER/1 &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&pool->mayday_timer) &pool->lock/1 irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock irq_context: softirq (&pool->mayday_timer) &obj_hash[i].lock irq_context: softirq (&pool->mayday_timer) &base->lock irq_context: softirq (&pool->mayday_timer) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock irq_context: 0 pernet_ops_rwsem devices_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &c->lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &data->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 &sig->cred_guard_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &data->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex dev_base_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex batched_entropy_u32.lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#11 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#11 &devlink_port->type_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &n->list_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#11 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER/1 &n->list_lock irq_context: 0 rtnl_mutex _xmit_ETHER/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &meta->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh key#21 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#21 irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock quarantine_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &pcp->lock &zone->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 namespace_sem pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 namespace_sem pcpu_alloc_mutex &rq->__lock irq_context: 0 namespace_sem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#17 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#18 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &sem->wait_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &sem->wait_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#18 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#3 irq_context: 0 &f->f_pos_lock sb_writers#3 sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#3 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#3 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &n->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &data->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &n->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &n->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 &type->s_umount_key#22/1 &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex failover_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &n->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &n->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &n->list_lock &c->lock irq_context: 0 nl_table_wait.lock &p->pi_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &n->list_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem wq_pool_mutex irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &list->lock#4 irq_context: 0 pernet_ops_rwsem &dir->lock#2 irq_context: 0 pernet_ops_rwsem ptype_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &local->services_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 pernet_ops_rwsem &call->waitq irq_context: 0 pernet_ops_rwsem &rx->call_lock irq_context: 0 pernet_ops_rwsem &rxnet->call_lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock sysctl_lock irq_context: 0 sb_writers#3 &____s->seqcount#11 irq_context: 0 sb_writers#3 &(&net->ipv4.ping_group_range.lock)->lock irq_context: 0 sb_writers#3 &(&net->ipv4.ping_group_range.lock)->lock &____s->seqcount#11 irq_context: 0 misc_mtx &dir->lock irq_context: 0 rtnl_mutex &r->consumer_lock irq_context: 0 rtnl_mutex &r->consumer_lock &r->producer_lock irq_context: 0 rtnl_mutex failover_lock irq_context: 0 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &mm->mmap_lock irq_context: 0 cpuset_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &n->lock irq_context: 0 rtnl_mutex &n->lock &(&n->ha_lock)->lock irq_context: 0 rtnl_mutex &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex rcu_read_lock lock#8 irq_context: 0 rtnl_mutex rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex &n->lock irq_context: 0 rtnl_mutex &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock irq_context: 0 rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &dir->lock#2 irq_context: 0 rtnl_mutex &ndev->lock pcpu_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 rtnl_mutex pcpu_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 rtnl_mutex &br->hash_lock irq_context: 0 rtnl_mutex &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock &c->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex nf_hook_mutex irq_context: 0 rtnl_mutex nf_hook_mutex fs_reclaim irq_context: 0 rtnl_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex nf_hook_mutex pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 rtnl_mutex nf_hook_mutex &c->lock irq_context: 0 rtnl_mutex nf_hook_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override pool_lock irq_context: 0 misc_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex j1939_netdev_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock kfence_freelist_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss tomoyo_policy_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem &pcp->lock &zone->lock irq_context: 0 rtnl_mutex dev_addr_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 rtnl_mutex &ndev->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem pgd_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem key irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock pool_lock#2 irq_context: 0 rtnl_mutex &bat_priv->tvlv.container_list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 rtnl_mutex key#16 irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock irq_context: softirq &(&bat_priv->nc.work)->timer irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) key#17 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) key#18 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rtnl_mutex lweventlist_lock &c->lock irq_context: 0 rtnl_mutex lweventlist_lock &____s->seqcount irq_context: 0 rtnl_mutex lweventlist_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 rtnl_mutex kernfs_idr_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wq->mutex irq_context: 0 rtnl_mutex &wq->mutex &pool->lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex init_lock irq_context: 0 rtnl_mutex init_lock slab_mutex irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex init_lock slab_mutex pool_lock#2 irq_context: 0 rtnl_mutex init_lock slab_mutex &c->lock irq_context: 0 rtnl_mutex init_lock slab_mutex &n->list_lock irq_context: 0 rtnl_mutex init_lock slab_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex init_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex init_lock slab_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex init_lock slab_mutex &k->list_lock irq_context: 0 rtnl_mutex init_lock slab_mutex lock irq_context: 0 rtnl_mutex init_lock slab_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex init_lock slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex init_lock slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex init_lock slab_mutex &____s->seqcount irq_context: 0 rtnl_mutex init_lock fs_reclaim irq_context: 0 rtnl_mutex init_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex init_lock &zone->lock irq_context: 0 rtnl_mutex init_lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex init_lock &rq->__lock irq_context: 0 rtnl_mutex init_lock &____s->seqcount irq_context: 0 rtnl_mutex init_lock pool_lock#2 irq_context: 0 rtnl_mutex init_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex init_lock &base->lock irq_context: 0 rtnl_mutex init_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex init_lock crngs.lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &c->lock irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex uevent_sock_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex deferred_lock irq_context: 0 (wq_completion)events deferred_process_work irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex irq_context: 0 rtnl_mutex target_list_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &br->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &pn->hash_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex deferred_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &n->list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &c->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->mcast.work)->timer irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond0#7 irq_context: 0 (wq_completion)bond0#7 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#7 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#7 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#7 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 rtnl_mutex _xmit_ETHER &____s->seqcount irq_context: softirq &(&slave->notify_work)->timer irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&mp->timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&br->mcast_gc_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem &c->lock irq_context: 0 (wq_completion)hci1 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)bond0#9 irq_context: 0 (wq_completion)bond0#9 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#9 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#9 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#9 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 irq_context: 0 rtnl_mutex team->team_lock_key#7 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#7 netpoll_srcu irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_node_0 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#7 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#7 &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond0#3 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) key#19 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) pool_lock#2 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex crngs.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 rtnl_mutex ptype_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnettable->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnettable->lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 &c->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 irq_context: 0 rtnl_mutex team->team_lock_key#9 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#9 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#9 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#9 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#9 &tn->lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work irq_context: 0 (wq_completion)events_power_efficient (gc_work).work tk_core.seq.seqcount irq_context: 0 (wq_completion)events_power_efficient (gc_work).work "ratelimiter_table_lock" irq_context: 0 (wq_completion)events_power_efficient (gc_work).work rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &base->lock &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&hdev->cmd_timer)->timer irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#20 irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 rtnl_mutex console_owner_lock irq_context: 0 rtnl_mutex console_owner irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&timer.timer) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_NONE irq_context: 0 rtnl_mutex lock#9 irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 irq_context: 0 rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#3 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#3 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pgd_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex key irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex percpu_counters_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem key irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem percpu_counters_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond0#10 irq_context: 0 (wq_completion)bond0#10 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#10 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#10 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#10 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond0#10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#10 irq_context: 0 rtnl_mutex team->team_lock_key#10 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &data->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond0#11 irq_context: 0 (wq_completion)bond0#11 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#11 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &c->lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 kfence_freelist_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex.wait_lock irq_context: 0 kn->active#5 &pcp->lock &zone->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 fanout_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 fanout_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#10 net_rwsem irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock &c->lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock &____s->seqcount irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &hsr->list_lock irq_context: 0 rtnl_mutex pin_fs_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 tomoyo_ss pgd_lock irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 tomoyo_ss key irq_context: 0 sb_writers#3 tomoyo_ss pcpu_lock irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[3] irq_context: 0 sb_writers#3 tomoyo_ss percpu_counters_lock irq_context: 0 sb_writers#3 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond0#11 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#11 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#11 irq_context: 0 rtnl_mutex team->team_lock_key#11 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#11 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#11 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#11 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#11 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#11 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#11 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#11 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#11 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#11 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#11 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#11 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#11 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#11 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#11 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#11 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#11 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#11 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#11 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 &im->lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 rtnl_mutex gdp_mutex &c->lock irq_context: 0 rtnl_mutex gdp_mutex &____s->seqcount irq_context: 0 rtnl_mutex gdp_mutex lock irq_context: 0 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 rtnl_mutex &k->k_lock irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 (console_sem).lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock mount_lock irq_context: 0 rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex mount_lock irq_context: 0 rtnl_mutex mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex team->team_lock_key#9 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#9 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#9 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#9 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &im->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#10 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#10 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#10 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#10 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#10 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#10 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#10 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#10 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 &im->lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#11 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#11 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#11 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 lock irq_context: 0 rtnl_mutex team->team_lock_key#11 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#11 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#11 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#11 (console_sem).lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &n->list_lock &c->lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) pool_lock#2 irq_context: 0 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&app->join_timer) irq_context: softirq (&app->join_timer) &app->lock irq_context: softirq (&app->join_timer) &list->lock#14 irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock irq_context: softirq (&app->join_timer) &app->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer) &app->lock &base->lock irq_context: softirq (&app->join_timer) &app->lock &base->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 irq_context: softirq (&app->join_timer)#2 &app->lock#2 irq_context: softirq (&app->join_timer)#2 &list->lock#15 irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock irq_context: softirq (&app->join_timer)#2 &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 &base->lock irq_context: softirq (&app->join_timer)#2 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &xa->xa_lock#13 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex _xmit_ETHER &pcp->lock &zone->lock irq_context: 0 rtnl_mutex _xmit_ETHER &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &tap_major->minor_lock irq_context: 0 rtnl_mutex rcu_read_lock &tap_major->minor_lock pool_lock#2 irq_context: 0 rtnl_mutex req_lock irq_context: 0 rtnl_mutex &x->wait#11 irq_context: 0 rtnl_mutex subsys mutex#81 irq_context: 0 rtnl_mutex subsys mutex#81 &k->k_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex net_rwsem &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &____s->seqcount irq_context: 0 kn->active#51 fs_reclaim irq_context: 0 kn->active#51 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#52 &rq->__lock irq_context: 0 kn->active#52 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#52 fs_reclaim irq_context: 0 kn->active#52 fs_reclaim &rq->__lock irq_context: 0 kn->active#52 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#52 &c->lock irq_context: 0 kn->active#52 &____s->seqcount irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock nsim_bus_dev_ids.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex device_links_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fwnode_link_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex device_links_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &xa->xa_lock#14 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &xa->xa_lock#14 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &xa->xa_lock#14 &n->list_lock &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#7 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#7 &devlink_port->type_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_event_queue_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &c->lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock &n->list_lock irq_context: softirq rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem vmap_area_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem purge_vmap_area_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &sb->s_type->i_lock_key#23 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_lock_key#23 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rename_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dentry->d_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &lru->node[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dentry->d_lock &lru->node[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex fs_reclaim irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &____s->seqcount#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 tasklist_lock &sighand->siglock &____s->seqcount#2 irq_context: 0 tasklist_lock &sighand->siglock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)wg-kex-wg0#13 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock (&timer.timer) irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#13 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#13 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#14 irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&tun->flow_gc_timer) irq_context: softirq (&tun->flow_gc_timer) &tun->lock irq_context: softirq &(&hwstats->traffic_dw)->timer irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex deferred_probe_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex probe_waitqueue.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock subsys mutex#82 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#17 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 kn->active#51 &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem irq_context: 0 rtnl_mutex devnet_rename_sem (console_sem).lock irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem &rq->__lock irq_context: 0 rtnl_mutex devnet_rename_sem fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem &k->list_lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem kernfs_rename_lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem kernfs_rename_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex devnet_rename_sem &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 rtnl_mutex &devlink_port->type_lock irq_context: 0 rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock krc.lock irq_context: 0 rtnl_mutex &nft_net->commit_mutex irq_context: 0 rtnl_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#18 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#9 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#9 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)phy23 irq_context: 0 (wq_completion)phy23 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy23 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu pool_lock#2 irq_context: 0 kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)phy24 irq_context: 0 (wq_completion)phy24 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy24 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem &c->lock irq_context: 0 kn->active#54 &____s->seqcount#2 irq_context: 0 kn->active#54 &____s->seqcount irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#56 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#57 &c->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem &c->lock irq_context: 0 rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (shepherd).work fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET krc.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET krc.lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex devnet_rename_sem &n->list_lock irq_context: 0 rtnl_mutex devnet_rename_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#8 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#8 &devlink_port->type_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &data->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &data->lock irq_context: 0 rtnl_mutex &ndev->lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &ndev->lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)events deferred_process_work &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->hash_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &handshake->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &table->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->endpoint_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex devnet_rename_sem &____s->seqcount irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &n->list_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex _xmit_SIT irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#8 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &____s->seqcount#2 irq_context: softirq rcu_read_lock &br->hash_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: softirq rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &data->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 lweventlist_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#8 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &base->lock irq_context: 0 rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: softirq (&app->periodic_timer) irq_context: softirq (&brmctx->ip6_own_query.timer) irq_context: softirq (&app->periodic_timer) &app->lock irq_context: softirq (&app->periodic_timer) &app->lock &obj_hash[i].lock irq_context: softirq (&app->periodic_timer) &app->lock &base->lock irq_context: softirq (&app->periodic_timer) &app->lock &base->lock &obj_hash[i].lock irq_context: softirq (&brmctx->ip6_own_query.timer) &br->multicast_lock irq_context: softirq (&brmctx->ip4_own_query.timer) irq_context: softirq (&brmctx->ip4_own_query.timer) &br->multicast_lock irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#15 irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#15 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#16 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&in_dev->mr_ifc_timer) irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &ul->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#7 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#9 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) &base->lock irq_context: softirq (&in_dev->mr_ifc_timer) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_TUNNEL irq_context: 0 rtnl_mutex _xmit_IPGRE irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#16 irq_context: 0 rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#11 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#11 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 _xmit_ETHER &n->list_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#10 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#10 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 lock irq_context: 0 rtnl_mutex team->team_lock_key#10 lock kernfs_idr_lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#9 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &ndev->lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: softirq &(&conn->info_timer)->timer irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&conn->info_timer)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&conn->info_timer)->work) &conn->chan_lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 cb_lock &rq->__lock irq_context: 0 cb_lock batched_entropy_u8.lock irq_context: 0 cb_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_TUNNEL6 irq_context: softirq &(&br->gc_work)->timer irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &____s->seqcount irq_context: 0 rtnl_mutex devnet_rename_sem console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem console_owner irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &meta->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_nat_proto_mutex nf_hook_mutex &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bond->stats_lock/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex quarantine_lock irq_context: 0 cb_lock remove_cache_srcu irq_context: 0 cb_lock remove_cache_srcu quarantine_lock irq_context: 0 cb_lock remove_cache_srcu &c->lock irq_context: 0 cb_lock remove_cache_srcu &n->list_lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#21 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#21 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#22 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#22 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#11 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#21 irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#22 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex (inet6addr_validator_chain).rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_node_0 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex krc.lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#19 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#20 irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#10 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock deferred_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock (console_sem).lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&pmctx->ip6_own_query.timer) irq_context: softirq (&pmctx->ip6_own_query.timer) &br->multicast_lock irq_context: softirq (&pmctx->ip4_own_query.timer) irq_context: softirq (&pmctx->ip4_own_query.timer) &br->multicast_lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &br->hash_lock irq_context: softirq rcu_read_lock &br->hash_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock &br->hash_lock &c->lock irq_context: softirq rcu_read_lock &br->hash_lock nl_table_lock irq_context: softirq rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: softirq rcu_read_lock &br->multicast_lock irq_context: softirq rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock &br->multicast_lock &dir->lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock deferred_lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock &c->lock irq_context: softirq rcu_read_lock &br->multicast_lock nl_table_lock irq_context: softirq rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: softirq rcu_read_lock &br->multicast_lock &base->lock irq_context: softirq rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &fs->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &fs->lock &dentry->d_lock irq_context: 0 (wq_completion)phy25 irq_context: 0 (wq_completion)phy25 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy25 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex genl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock deferred_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem batched_entropy_u8.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem kfence_freelist_lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex.wait_lock irq_context: 0 cb_lock &p->pi_lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &pcp->lock &zone->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#10 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &meta->lock irq_context: 0 (wq_completion)phy26 irq_context: 0 (wq_completion)phy26 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy26 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER/1 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex devnet_rename_sem quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#10 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu irq_context: 0 cb_lock genl_mutex remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) irq_context: softirq (&hsr->announce_timer) rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &hsr->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &meta->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#9 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#9 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#9 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback quarantine_lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &ndev->lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex pgd_lock irq_context: 0 rtnl_mutex key irq_context: 0 rtnl_mutex percpu_counters_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &meta->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#10 &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &hsr->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &ndev->lock &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &nn->netlink_tap_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#3 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &base->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#11 _xmit_ETHER &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock key#16 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex j1939_netdev_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem pgd_lock irq_context: 0 rtnl_mutex dev_addr_sem key irq_context: 0 rtnl_mutex dev_addr_sem pcpu_lock irq_context: 0 rtnl_mutex dev_addr_sem percpu_counters_lock irq_context: 0 rtnl_mutex dev_addr_sem &cfs_rq->removed.lock irq_context: softirq (&hsr->prune_timer) irq_context: softirq (&hsr->prune_timer) &hsr->list_lock irq_context: softirq (&hsr->prune_timer) &obj_hash[i].lock irq_context: softirq (&hsr->prune_timer) &base->lock irq_context: softirq (&hsr->prune_timer) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &n->list_lock &c->lock irq_context: 0 kn->active#55 &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock quarantine_lock irq_context: softirq (&pool->mayday_timer) &pool->lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock &p->pi_lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock &p->pi_lock &rq->__lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex &p->pi_lock irq_context: 0 wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_node_0 irq_context: 0 wq_pool_attach_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex &stopper->lock irq_context: 0 wq_pool_attach_mutex &stop_pi_lock irq_context: 0 wq_pool_attach_mutex &stop_pi_lock &rq->__lock irq_context: 0 wq_pool_attach_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex &rq->__lock irq_context: 0 &x->wait#7 irq_context: 0 wq_pool_attach_mutex &x->wait#7 irq_context: 0 &pool->lock wq_mayday_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->tt.work)->timer irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex team->team_lock_key#10 &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#20 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &bat_priv->tt.req_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &bat_priv->tt.roam_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pgd_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem key irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem percpu_counters_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex dev_addr_sem quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#10 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#10 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#10 remove_cache_srcu irq_context: 0 rtnl_mutex team->team_lock_key#10 remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#10 remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_owner irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pgd_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem key irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: softirq (&ndev->rs_timer) irq_context: softirq (&ndev->rs_timer) &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) pool_lock#2 irq_context: softirq (&ndev->rs_timer) &dir->lock#2 irq_context: softirq (&ndev->rs_timer) &ul->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &base->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock kfence_freelist_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock &list->lock#16 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &pcp->lock &zone->lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &list->lock#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex dev_addr_sem &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 rtnl_mutex key#21 irq_context: 0 rtnl_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh key#21 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex &dir->lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex k-slock-AF_INET irq_context: 0 rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 rtnl_mutex k-slock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &wg->device_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock fs_reclaim irq_context: 0 rtnl_mutex &wg->device_update_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &wg->device_update_lock &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex &wg->device_update_lock &dir->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-slock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-slock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock &wg->socket_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &wg->device_update_lock &list->lock#17 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock &c->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#9 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#9 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#9 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#9 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#9 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#9 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#9 &tn->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &n->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock &r->producer_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock &meta->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->ad_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &po->bind_lock irq_context: 0 rtnl_mutex rcu_read_lock &po->bind_lock ptype_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock &____s->seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &pcp->lock &zone->lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 rcu_state.barrier_mutex pgd_lock irq_context: 0 rcu_state.barrier_mutex stock_lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &____s->seqcount irq_context: 0 rcu_state.barrier_mutex rcu_read_lock pool_lock#2 irq_context: 0 rcu_state.barrier_mutex key irq_context: 0 rcu_state.barrier_mutex pcpu_lock irq_context: 0 rcu_state.barrier_mutex percpu_counters_lock irq_context: 0 misc_mtx rfkill_global_mutex irq_context: 0 rcu_state.barrier_mutex pcpu_lock stock_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override &c->lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override &n->list_lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &rfkill->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &c->lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_radio_lock irq_context: 0 cb_lock genl_mutex &x->wait#9 irq_context: 0 cb_lock genl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex gdp_mutex irq_context: 0 cb_lock genl_mutex gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex lock irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex bus_type_sem irq_context: 0 cb_lock genl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->ad_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock pgd_lock irq_context: 0 &p->lock stock_lock irq_context: 0 &p->lock key irq_context: 0 &p->lock pcpu_lock irq_context: 0 &p->lock percpu_counters_lock irq_context: 0 &p->lock pcpu_lock stock_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->packet.sklist_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &____s->seqcount#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &xa->xa_lock#6 stock_lock irq_context: 0 sb_writers#4 &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &xa->xa_lock#6 &c->lock irq_context: 0 sb_writers#4 (console_sem).lock irq_context: 0 sb_writers#4 console_lock console_srcu console_owner_lock irq_context: 0 sb_writers#4 console_lock console_srcu console_owner irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_updates irq_context: 0 sb_writers#4 &journal->j_barrier irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock irq_context: softirq &keypair->receiving_counter.lock irq_context: softirq &peer->keypairs.keypair_update_lock irq_context: softirq &list->lock#17 irq_context: softirq rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock_bh &base->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier jbd2_handle irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &journal->j_barrier &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &x->wait#26 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex (&timer.timer) irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &dd->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &c->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &n->list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock key#14 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_node_0 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 key irq_context: 0 (wq_completion)bond0#9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex subsys mutex#55 irq_context: 0 cb_lock genl_mutex subsys mutex#55 &k->k_lock irq_context: 0 cb_lock genl_mutex device_links_lock irq_context: 0 cb_lock genl_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex deferred_probe_mutex irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cb_lock genl_mutex wq_pool_mutex irq_context: 0 cb_lock genl_mutex wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex crngs.lock irq_context: 0 cb_lock genl_mutex triggers_list_lock irq_context: 0 cb_lock genl_mutex leds_list_lock irq_context: 0 cb_lock genl_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 cb_lock genl_mutex &zone->lock irq_context: 0 cb_lock genl_mutex &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#17 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex param_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex param_lock rate_ctrl_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex (console_sem).lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex kobj_ns_type_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#56 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#56 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &base->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rfkill_global_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex bus_type_sem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rfkill->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex subsys mutex#40 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex subsys mutex#40 &k->k_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex triggers_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex leds_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex.wait_lock irq_context: 0 cb_lock genl_mutex pin_fs_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &tn->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx failover_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_inum_ida.xa_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pnettable->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx smc_ib_devices.mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ndev->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 cb_lock genl_mutex (inetaddr_chain).rwsem irq_context: 0 cb_lock genl_mutex inet6addr_chain.lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 cb_lock rcu_read_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock quarantine_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rdev->mgmt_registrations_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#6 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (&dwork->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->color_collision_detect_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 cb_lock rtnl_mutex.wait_lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx rtnl_mutex.wait_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &list->lock#18 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &wdev->event_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rq->__lock irq_context: 0 (wq_completion)cfg80211 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx nl_table_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx nl_table_wait.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &list->lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)events wireless_nlevent_work irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &list->lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: softirq &list->lock#19 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &pcp->lock &zone->lock irq_context: 0 cb_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &lock->wait_lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &vlan_netdev_addr_lock_key/1 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem pool_lock#2 irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem nl_table_lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount irq_context: 0 &type->s_umount_key#46/1 irq_context: 0 &type->s_umount_key#46/1 fs_reclaim irq_context: 0 &type->s_umount_key#46/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#46/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#46/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#46/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#46/1 sb_lock irq_context: 0 &type->s_umount_key#46/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#46/1 pool_lock#2 irq_context: 0 &type->s_umount_key#46/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#46/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#46/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_lock_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 binderfs_minors_mutex irq_context: 0 &type->s_umount_key#46/1 binderfs_minors_mutex binderfs_minors.xa_lock irq_context: 0 &type->s_umount_key#46/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rename_lock.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: softirq rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &list->lock#19 irq_context: softirq rcu_read_lock lock#6 irq_context: softirq rcu_read_lock lock#6 kcov_remote_lock irq_context: softirq rcu_read_lock &local->rx_path_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &list->lock#18 irq_context: softirq rcu_read_lock &local->rx_path_lock &rdev->wiphy_work_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 kcov_remote_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 pool_lock#2 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &dentry->d_lock &wq irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &sb->s_type->i_lock_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rcu_read_lock iunique_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex css_set_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_file_kn_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock cgroup_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex task_group_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->i_mutex_dir_key#6 irq_context: 0 &type->i_mutex_dir_key#6 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq irq_context: 0 kn->active#53 fs_reclaim irq_context: 0 kn->active#53 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount irq_context: 0 kn->active#54 fs_reclaim irq_context: 0 kn->active#54 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#54 &c->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_node_0 irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &s->s_inode_list_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &xa->xa_lock#6 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &fsnotify_mark_srcu irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &p->pi_lock &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: hardirq per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events kernfs_notify_work &root->kernfs_supers_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex css_set_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock cgroup_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cgroup_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem jump_label_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem jump_label_mutex text_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem callback_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem.waiters.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex percpu_counters_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &pgdat->memcg_lru.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->i_mutex_dir_key#7 irq_context: 0 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &____s->seqcount#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#55 &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq irq_context: 0 kn->active#55 fs_reclaim irq_context: 0 kn->active#55 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem.waiters.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->alloc_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->alloc_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem cpuset_attach_wq.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &memcg->mm_list.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock krc.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &xa->xa_lock#12 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &xa->xa_lock#12 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#12 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#12 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#12 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 kn->active#56 fs_reclaim irq_context: 0 kn->active#56 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#56 stock_lock irq_context: 0 kn->active#56 pool_lock#2 irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &____s->seqcount#2 irq_context: 0 tomoyo_ss &____s->seqcount#2 irq_context: 0 kn->active#57 fs_reclaim irq_context: 0 kn->active#57 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#57 stock_lock irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex kn->active#57 memcg_max_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &xa->xa_lock#12 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &xa->xa_lock#12 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex devcgroup_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount#2 irq_context: 0 rtnl_mutex &wg->device_update_lock &n->list_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 nf_nat_proto_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->forw_bcast_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->gw.list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&bat_priv->bat_v.ogm_wq)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->bat_v.ogm_buff_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->bat_v.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->bat_v.ogm_buff_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tvlv.handler_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&bat_priv->nc.work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#18 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq rcu_callback cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 pool_lock#2 irq_context: softirq rcu_callback cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&bat_priv->dat.work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hash->list_locks[i] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&bat_priv->bla.work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#21 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#21 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#21 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#21 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&bat_priv->mcast.work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&bat_priv->tt.work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#16 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#16 &bat_priv->softif_vlan_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#16 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#16 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#16 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#20 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tt.req_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &bat_priv->tt.roam_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&bat_priv->orig_work)->work) irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: softirq rcu_read_lock hwsim_radio_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#12 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#12 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount irq_context: 0 kn->active#55 stock_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock rcu_read_lock &sighand->siglock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &newf->file_lock irq_context: 0 &xa->xa_lock#12 pool_lock#2 irq_context: 0 nf_hook_mutex irq_context: 0 nf_hook_mutex fs_reclaim irq_context: 0 nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_hook_mutex stock_lock irq_context: 0 nf_hook_mutex pool_lock#2 irq_context: 0 ebt_mutex &mm->mmap_lock irq_context: 0 nf_hook_mutex &c->lock irq_context: 0 nf_hook_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 stock_lock irq_context: 0 &mm->mmap_lock stock_lock irq_context: 0 &xt[i].mutex &c->lock irq_context: 0 &xt[i].mutex &n->list_lock irq_context: 0 &xt[i].mutex &____s->seqcount irq_context: 0 nf_hook_mutex &____s->seqcount#2 irq_context: 0 &xt[i].mutex &mm->mmap_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &xt[i].mutex vmap_area_lock irq_context: 0 &xt[i].mutex &per_cpu(xt_recseq, i) irq_context: 0 &xt[i].mutex &obj_hash[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock irq_context: 0 &xt[i].mutex rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback stock_lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &c->lock irq_context: 0 nf_nat_proto_mutex irq_context: 0 nf_nat_proto_mutex fs_reclaim irq_context: 0 nf_nat_proto_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_nat_proto_mutex pool_lock#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex irq_context: 0 nf_nat_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 nf_nat_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_nat_proto_mutex nf_hook_mutex stock_lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 nf_nat_proto_mutex cpu_hotplug_lock irq_context: 0 nf_nat_proto_mutex &obj_hash[i].lock irq_context: 0 nf_nat_proto_mutex stock_lock irq_context: 0 pcpu_alloc_mutex fs_reclaim irq_context: 0 pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pcpu_alloc_mutex &____s->seqcount irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pcpu_alloc_mutex rcu_node_0 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pool->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 pcpu_alloc_mutex rcu_read_lock rcu_node_0 irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 &xt[i].mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 elock-AF_INET6 irq_context: 0 &pipe->mutex/1 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#12 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#12 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#12 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle stock_lock irq_context: 0 loop_validate_mutex irq_context: 0 loop_validate_mutex &lo->lo_mutex irq_context: 0 &fsnotify_mark_srcu fs_reclaim irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#4 &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sb_writers#8 kn->active#5 &c->lock irq_context: 0 sb_writers#8 kn->active#5 &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &____s->seqcount irq_context: 0 sb_writers#8 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#8 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &rq->__lock irq_context: 0 rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 &xt[i].mutex init_mm.page_table_lock irq_context: 0 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 purge_vmap_area_lock pool_lock#2 irq_context: 0 &xt[i].mutex &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem stock_lock irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem key#19 irq_context: 0 rtnl_mutex team->team_lock_key#7 input_pool.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_mayday_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &base->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &ndev->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &n->list_lock irq_context: softirq rcu_read_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &group->notification_waitq &ep->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &____s->seqcount#2 irq_context: 0 rtnl_mutex _xmit_ETHER &____s->seqcount#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 lock irq_context: 0 rtnl_mutex team->team_lock_key#7 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#7 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#7 &____s->seqcount#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#7 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 remove_cache_srcu irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock nsim_bus_dev_ids.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock dev_pm_qos_sysfs_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock subsys mutex#82 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &k->k_lock klist_remove_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex device_links_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &k->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#14 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 &vma->vm_lock->lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex nf_hook_mutex &n->list_lock irq_context: 0 rtnl_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock init_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &____s->seqcount#2 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &r->consumer_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx &n->list_lock irq_context: 0 misc_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#7 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#13 irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &n->list_lock irq_context: 0 cb_lock genl_mutex &____s->seqcount#2 irq_context: 0 cb_lock rcu_read_lock &n->list_lock irq_context: 0 cb_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#12 irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#12 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 &r->consumer_lock irq_context: 0 &mm->mmap_lock stock_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock &meta->lock irq_context: softirq rcu_read_lock kfence_freelist_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_persistent_keepalive) irq_context: softirq (&peer->timer_persistent_keepalive) &list->lock#17 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 stock_lock irq_context: 0 cb_lock genl_mutex &tn->node_list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &p->pi_lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem stock_lock irq_context: 0 pcpu_lock stock_lock irq_context: 0 tasklist_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &s->s_inode_list_lock irq_context: 0 sb_writers#4 sb_internal irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal pool_lock#2 irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &xa->xa_lock#6 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 sb_internal &obj_hash[i].lock irq_context: 0 sb_writers#4 inode_hash_lock irq_context: 0 sb_writers#4 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &xa->xa_lock#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &xattrs->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE slock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#20 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE rose_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE wlock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &list->lock#20 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE rlock-AF_ROSE irq_context: 0 tasklist_lock &p->alloc_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &c->lock irq_context: 0 sb_writers#4 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) &____s->seqcount#2 irq_context: 0 kn->active#5 &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &hsr->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex ptype_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &rq->__lock irq_context: 0 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#5 &____s->seqcount#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh key#21 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &p->lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &n->list_lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 sb_writers#4 sb_internal &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 &group->notification_waitq &ep->lock &ep->wq irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbinfo->stat_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &group->mark_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex rcu_read_lock console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&peer->timer_persistent_keepalive) pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) tk_core.seq.seqcount irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#46/1 &c->lock irq_context: 0 &type->s_umount_key#46/1 &____s->seqcount#2 irq_context: 0 &type->s_umount_key#46/1 &____s->seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rnp->exp_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 remove_cache_srcu irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#11 remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#11 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &____s->seqcount irq_context: 0 kn->active#56 &c->lock irq_context: 0 kn->active#56 &____s->seqcount#2 irq_context: 0 kn->active#56 &n->list_lock irq_context: 0 kn->active#56 &n->list_lock &c->lock irq_context: 0 kn->active#56 &rq->__lock irq_context: 0 kn->active#56 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 nf_nat_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#6 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 ebt_mutex &c->lock irq_context: 0 ebt_mutex &____s->seqcount#2 irq_context: 0 ebt_mutex &____s->seqcount irq_context: 0 &xt[i].mutex &n->list_lock &c->lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &c->lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 nf_hook_mutex &rq->__lock irq_context: 0 nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_nat_proto_mutex &c->lock irq_context: 0 nf_nat_proto_mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 (wq_completion)phy27 irq_context: 0 (wq_completion)phy27 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy27 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock quarantine_lock irq_context: 0 (wq_completion)phy28 irq_context: 0 (wq_completion)phy28 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy28 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 ebt_mutex &n->list_lock irq_context: 0 ebt_mutex &n->list_lock &c->lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex key#21 irq_context: 0 &iint->mutex mapping.invalidate_lock stock_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#6 stock_lock irq_context: 0 &iint->mutex &p->alloc_lock irq_context: 0 &iint->mutex &list->lock irq_context: 0 &iint->mutex kauditd_wait.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex key#21 &entry->crc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &sem->wait_lock irq_context: 0 rtnl_mutex &xs->mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &xs->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET key#24 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex purge_vmap_area_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&br->hello_timer) irq_context: softirq (&br->hello_timer) &br->lock irq_context: softirq (&br->hello_timer) &br->lock &obj_hash[i].lock irq_context: softirq (&br->hello_timer) &br->lock &base->lock irq_context: softirq (&br->hello_timer) &br->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 sk_lock-AF_X25 x25_route_list_lock irq_context: 0 sk_lock-AF_X25 x25_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &bat_priv->softif_vlan_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex key#16 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &bat_priv->tt.changes_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &batadv_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex irq_context: 0 &nft_net->commit_mutex irq_context: 0 &nft_net->commit_mutex fs_reclaim irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &nft_net->commit_mutex pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 sk_lock-AF_PACKET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_PACKET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET remove_cache_srcu irq_context: 0 sk_lock-AF_PACKET remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_PACKET remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &q->instances_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock &data->lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &base->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem key irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem percpu_counters_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#6 &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &bat_priv->softif_vlan_list_lock &c->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#8 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#8 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 rcu_read_lock pool_lock#2 irq_context: 0 kn->active#5 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#8 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &net->ipv4.ra_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 misc_mtx &____s->seqcount#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#10 remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#10 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#10 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#10 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#10 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock &c->lock irq_context: 0 kn->active#51 &____s->seqcount#2 irq_context: 0 kn->active#51 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ip6_fl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ip6_fl_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ip6_fl_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ip6_fl_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex (&tbl->periodic_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex &base->lock &obj_hash[i].lock irq_context: softirq rcu_callback proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex &svc->sched_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex (console_sem).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &x->wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->idrinfo->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->idrinfo->lock#2 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->idrinfo->lock#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex prog_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex bpf_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem tcp_metrics_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem tcp_metrics_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem tcp_metrics_lock krc.lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 __ip_vs_mutex ip_vs_sched_mutex irq_context: 0 __ip_vs_mutex cpu_hotplug_lock irq_context: 0 __ip_vs_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 __ip_vs_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex pcpu_alloc_mutex irq_context: 0 __ip_vs_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 __ip_vs_mutex &c->lock irq_context: 0 __ip_vs_mutex rcu_read_lock &pool->lock irq_context: 0 __ip_vs_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 __ip_vs_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &c->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock_bh &data->lock irq_context: softirq (&n->timer) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock_bh pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &ul->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET batched_entropy_u32.lock irq_context: softirq (&n->timer) k-slock-AF_INET &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&n->timer) &data->lock irq_context: softirq (&n->timer) quarantine_lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock &c->lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock &n->list_lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem percpu_counters_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start pgd_lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start key irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start pcpu_lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start percpu_counters_lock irq_context: 0 &ep->mtx pgd_lock irq_context: 0 &ep->mtx key irq_context: 0 &ep->mtx pcpu_lock irq_context: 0 &ep->mtx percpu_counters_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)rcu_gp &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &obj_hash[i].lock irq_context: 0 &rq->__lock cid_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &cfs_rq->removed.lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &base->lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &tipc_net(net)->bclock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 pgd_lock irq_context: 0 sb_writers#4 key irq_context: 0 sb_writers#4 pcpu_lock irq_context: 0 sb_writers#4 percpu_counters_lock irq_context: 0 sb_writers#4 pcpu_lock stock_lock irq_context: 0 cb_lock genl_mutex &pn->l2tp_tunnel_idr_lock irq_context: 0 cb_lock genl_mutex &pn->l2tp_tunnel_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex l2tp_ip_lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET l2tp_ip_lock irq_context: 0 cb_lock genl_mutex k-slock-AF_INET irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET pool_lock#2 irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET &rq->__lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET batched_entropy_u16.lock crngs.lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) pool_lock#2 irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &sighand->siglock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)rcu_gp &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &data->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &data->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &n->list_lock &c->lock irq_context: 0 sb_writers#3 &cfs_rq->removed.lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &obj_hash[i].lock pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &data->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 cb_lock genl_mutex fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock pcpu_lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &____s->seqcount irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 pcpu_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond1 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 raw_lock irq_context: 0 &sb->s_type->i_mutex_key#10 raw_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex pool_lock#2 irq_context: 0 &smc->clcsock_release_lock ipvs->sync_mutex irq_context: 0 nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &lock->wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#6 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)phy8 irq_context: 0 (wq_completion)phy8 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy8 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: softirq (&peer->hb_timer) irq_context: softirq (&peer->hb_timer) slock-AF_INET6 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 pool_lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 batched_entropy_u32.lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &dir->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &cfs_rq->removed.lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)bond0#11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond0#11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex net_rwsem nl_table_lock irq_context: 0 rtnl_mutex net_rwsem nl_table_wait.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx kfence_freelist_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &type->i_mutex_dir_key#4 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sb_writers#4 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock genl_mutex rtnl_mutex console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 kn->active#55 &c->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)phy12 irq_context: 0 (wq_completion)phy12 &rq->__lock irq_context: 0 (wq_completion)phy12 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy12 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &dentry->d_lock irq_context: 0 sb_writers#4 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#4 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#4 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &ei->xattr_sem irq_context: 0 sb_writers#4 &ei->xattr_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem nl_table_wait.lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &c->lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &____s->seqcount#2 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#6 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#6 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &sem->wait_lock irq_context: 0 sb_writers#10 &p->pi_lock irq_context: 0 sb_writers#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &rq->__lock irq_context: 0 kn->active#53 &c->lock irq_context: 0 kn->active#53 &____s->seqcount irq_context: 0 kn->active#53 pool_lock#2 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cgroup_mutex.wait_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &ifibss->incomplete_lock irq_context: softirq rcu_read_lock &rdev->wiphy_work_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->rx_path_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex.wait_lock irq_context: 0 sb_writers#10 &of->mutex &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cgroup_mutex.wait_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex.wait_lock irq_context: 0 sb_writers#11 &of->mutex &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ebt_mutex &rq->__lock irq_context: 0 ebt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock krc.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 nf_nat_proto_mutex &n->list_lock irq_context: 0 nf_nat_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 nf_hook_mutex nf_hook_mutex.wait_lock irq_context: 0 kn->active#56 &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &n->list_lock irq_context: 0 nf_hook_mutex.wait_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_nat_proto_mutex &n->list_lock &c->lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex.wait_lock irq_context: 0 nf_nat_proto_mutex &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &xa->xa_lock#12 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &xa->xa_lock#12 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 nf_hook_mutex remove_cache_srcu irq_context: 0 &xt[i].mutex remove_cache_srcu irq_context: 0 nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 &xt[i].mutex remove_cache_srcu quarantine_lock irq_context: 0 nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 mount_lock irq_context: 0 &mm->mmap_lock sb_writers#4 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_writers#4 pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &mm->mmap_lock sb_writers#4 &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 l2tp_ip6_lock irq_context: 0 &sb->s_type->i_mutex_key#10 l2tp_ip6_lock irq_context: 0 rcu_read_lock rcu_read_lock &p->alloc_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &sdata->sec_mtx irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 mapping.invalidate_lock irq_context: 0 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 mapping.invalidate_lock &____s->seqcount irq_context: 0 mapping.invalidate_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock stock_lock irq_context: 0 mapping.invalidate_lock &xa->xa_lock#6 irq_context: 0 mapping.invalidate_lock lock#4 irq_context: 0 mapping.invalidate_lock &xa->xa_lock#6 stock_lock irq_context: 0 sb_writers#4 stock_lock irq_context: 0 mapping.invalidate_lock &xa->xa_lock#6 pool_lock#2 irq_context: 0 mapping.invalidate_lock &rq->__lock irq_context: 0 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock &dd->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &p->lock &of->mutex kn->active#5 &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 sb_writers#4 inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#4 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sem->wait_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &xt[i].mutex &rq->__lock irq_context: 0 &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults irq_context: 0 &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock stock_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->private_lock irq_context: 0 &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &mapping->i_mmap_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 batched_entropy_u32.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &ei->i_data_sem &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &ei->i_data_sem &ei->i_data_sem/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &ei->i_data_sem &ei->i_data_sem/1 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 alg_types_sem irq_context: 0 sk_lock-AF_ALG irq_context: 0 sk_lock-AF_ALG slock-AF_ALG irq_context: 0 slock-AF_ALG irq_context: 0 sk_lock-AF_ALG fs_reclaim irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG pool_lock#2 irq_context: 0 sk_lock-AF_ALG &dir->lock irq_context: 0 sk_lock-AF_ALG &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock irq_context: 0 sb_writers#4 &xa->xa_lock#6 irq_context: 0 sb_writers#4 &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &ei->i_es_lock irq_context: 0 &sighand->siglock stock_lock irq_context: 0 sb_writers#8 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#8 tomoyo_ss &n->list_lock &c->lock irq_context: softirq (&peer->timer_persistent_keepalive) &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &hashinfo->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &hashinfo->lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock hwsim_radio_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xdp.lock irq_context: 0 &xs->mutex irq_context: 0 &xs->mutex fs_reclaim irq_context: 0 &xs->mutex fs_reclaim &rq->__lock irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xs->mutex pool_lock#2 irq_context: 0 &xs->mutex umem_ida.xa_lock irq_context: 0 &xs->mutex &c->lock irq_context: 0 &xs->mutex &mm->mmap_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xs->mutex &mm->mmap_lock &____s->seqcount irq_context: 0 &xs->mutex &mm->mmap_lock pool_lock#2 irq_context: 0 &xs->mutex &mm->mmap_lock stock_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &xs->mutex &mm->mmap_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &xs->mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 lock#4 irq_context: 0 sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 lock#5 irq_context: 0 sb_writers#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock rcu_read_lock key#10 irq_context: 0 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_prealloc_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 integrity_iint_lock irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &obj_hash[i].lock pool_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &sem->wait_lock irq_context: 0 &xs->mutex &p->pi_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock irq_context: 0 &xs->mutex &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex free_vmap_area_lock irq_context: 0 &xs->mutex vmap_area_lock irq_context: 0 &xs->mutex &____s->seqcount irq_context: 0 &xs->mutex init_mm.page_table_lock irq_context: 0 rtnl_mutex &xs->mutex irq_context: 0 rtnl_mutex &xs->mutex fs_reclaim irq_context: 0 rtnl_mutex &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &xs->mutex pool_lock#2 irq_context: 0 rtnl_mutex &xs->mutex &zone->lock irq_context: 0 rtnl_mutex &xs->mutex &____s->seqcount irq_context: 0 lock link_idr_lock irq_context: 0 lock link_idr_lock pool_lock#2 irq_context: 0 tracepoints_mutex irq_context: 0 tracepoints_mutex fs_reclaim irq_context: 0 tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tracepoints_mutex pool_lock#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 link_idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xdp.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->map_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_XDP irq_context: 0 link_idr_lock &obj_hash[i].lock irq_context: 0 link_idr_lock pool_lock#2 irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex &rq->__lock irq_context: 0 tracepoints_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tracepoint_srcu_srcu_usage.lock irq_context: softirq (&sdp->delay_work) irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &base->lock irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&pool->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pool->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pool->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pool->work) umem_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &lruvec->lru_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&aux->work) stock_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sk_lock-AF_NETROM irq_context: 0 sk_lock-AF_NETROM &rq->__lock irq_context: 0 sk_lock-AF_NETROM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM slock-AF_NETROM irq_context: 0 slock-AF_NETROM irq_context: 0 pernet_ops_rwsem stock_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 pernet_ops_rwsem &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex stock_lock irq_context: 0 &mm->mmap_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &c->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &c->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 text_mutex &rq->__lock irq_context: 0 text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 text_mutex.wait_lock irq_context: 0 lock link_idr_lock batched_entropy_u8.lock irq_context: 0 lock link_idr_lock kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: softirq &(&bat_priv->dat.work)->timer irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->bla.work)->timer irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &hash->list_locks[i] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) key#21 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &entry->crc_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 &fsnotify_mark_srcu &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock (console_sem).lock irq_context: 0 rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#15 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET batched_entropy_u16.lock crngs.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock key#22 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#17 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &nft_net->commit_mutex &c->lock irq_context: 0 &nft_net->commit_mutex &n->list_lock irq_context: 0 &nft_net->commit_mutex &n->list_lock &c->lock irq_context: 0 &nft_net->commit_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle key#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#6 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#15 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &sem->wait_lock irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#2 irq_context: softirq rcu_callback key#22 irq_context: 0 &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &lru->node[i].lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 mapping.invalidate_lock &ei->i_data_sem irq_context: 0 mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &xs->mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock mapping.invalidate_lock irq_context: 0 ipvs->sync_mutex irq_context: 0 ipvs->sync_mutex &rq->__lock irq_context: 0 ipvs->sync_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock key#22 irq_context: 0 sk_lock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC fs_reclaim irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock irq_context: 0 sk_lock-AF_TIPC &list->lock#21 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &list->lock#21 irq_context: 0 slock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET6 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &mm->mmap_lock &rq->__lock irq_context: 0 &p->lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &list->lock#21 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC &list->lock#21 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC clock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_TIPC irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &mapping->private_lock irq_context: 0 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock quarantine_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sem->wait_lock irq_context: 0 sb_writers#4 &p->pi_lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 sb_writers#4 &iint->mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh key#21 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM slock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM wlock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &list->lock#22 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM nr_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM rlock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_NETROM irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 &xs->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &n->list_lock irq_context: 0 &xs->mutex &n->list_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#14 irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock kfence_freelist_lock irq_context: 0 text_mutex text_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex jump_label_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex stock_lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns irq_context: 0 (wq_completion)netns net_cleanup_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->nsid_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &tn->node_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ebt_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem netns_bpf_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)rcu_gp &rq->__lock irq_context: 0 (wq_completion)rcu_gp &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->fs_probe_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->cells_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->cells_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem bit_wait_table + i irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock key irq_context: 0 rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 rcu_read_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 kfence_freelist_lock irq_context: 0 (wq_completion)afs irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &net->cells_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->fs_timer) irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &(&net->fs_lock)->lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->incoming_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &call->waitq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_read_lock &call->notify_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC (rxrpc_call_limiter).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->recvmsg_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->call_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->call_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC (&call->timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &list->lock#23 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)kafsd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &local->services_lock irq_context: 0 rcu_read_lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)krxrpcd irq_context: 0 rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rlock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait irq_context: 0 &rxnet->local_mutex irq_context: 0 (&local->client_conn_reap_timer) irq_context: 0 &rxnet->conn_lock irq_context: 0 &table->hash[i].lock irq_context: 0 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 k-clock-AF_INET6 irq_context: 0 &list->lock#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex (work_completion)(&data->gc_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_connlabels_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&ovs_net->dp_notify_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &srv->idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &tn->nametbl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ptype_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 sb_internal jbd2_handle &(ei->i_block_reservation_lock) key#15 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&tn->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[0] irq_context: softirq rcu_callback rlock-AF_RXRPC irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sk_lock-AF_TIPC &c->lock irq_context: 0 sb_writers#8 kn->active#5 &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&c->work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&rxnet->peer_keepalive_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)krdsd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rds_tcp_conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem loop_conns_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)l2tp irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&rxnet->service_conn_reap_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait#10 irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->service_conn_reaper) irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->service_conn_reaper) &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex bpf_devs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex class irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->xdp.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mirred_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_report_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->routes.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex target_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_NONE irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback pcpu_lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 sb_writers#8 kn->active#5 &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex deferred_probe_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex device_links_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex device_links_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem lweventlist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem netdev_unregistering_wq.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_SIT irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 batched_entropy_u32.lock crngs.lock irq_context: 0 mapping.invalidate_lock &c->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &stopper->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &stop_pi_lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &bgl->locks[i].lock irq_context: 0 &group->mark_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &iint->mutex &____s->seqcount#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 nf_sockopt_mutex &rq->__lock irq_context: 0 nf_sockopt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kfence_freelist_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&peer->timer_retransmit_handshake) irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock pool_lock#2 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] batched_entropy_u8.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock quarantine_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &n->list_lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &lock->wait_lock irq_context: 0 lock prog_idr_lock &c->lock irq_context: 0 lock link_idr_lock &c->lock irq_context: 0 tracepoints_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &xt[i].mutex remove_cache_srcu &c->lock irq_context: 0 &xt[i].mutex remove_cache_srcu &n->list_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock (work_completion)(flush) irq_context: 0 (wq_completion)events_highpri (work_completion)(flush) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &x->wait#10 irq_context: 0 (wq_completion)events_highpri (work_completion)(flush) &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &____s->seqcount#2 irq_context: 0 rtnl_mutex &xs->mutex &zone->lock &____s->seqcount irq_context: 0 mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &fn->fou_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &n->list_lock &c->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &c->lock irq_context: 0 sb_writers#7 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock sb_pagefaults batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock sb_pagefaults kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ei->i_data_sem &sem->wait_lock irq_context: 0 &ei->i_data_sem &rq->__lock irq_context: 0 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_radio_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem rdma_nets.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem devices_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &nlk->wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets.xa_lock irq_context: 0 &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 sb_writers#4 sb_internal &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal &____s->seqcount irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu pool_lock#2 irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET fs_reclaim irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET &rq->__lock irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sb_writers#4 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_LOOPBACK irq_context: softirq (&peer->timer_persistent_keepalive) &____s->seqcount#2 irq_context: softirq (&peer->timer_persistent_keepalive) &____s->seqcount irq_context: 0 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &ei->i_data_sem pool_lock#2 irq_context: 0 &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#6 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex __ip_vs_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex __ip_vs_mutex &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &____s->seqcount irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle bit_wait_table + i irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hn->hn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)inet_frag_wq irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &this->info_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnetids_ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 &list->lock#25 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 &ret->b_state_lock rcu_read_lock pool_lock#2 irq_context: 0 &ret->b_state_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &zone->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 bit_wait_table + i irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem key#15 irq_context: 0 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET pool_lock#2 irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET &mm->mmap_lock irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &pnsocks.lock irq_context: 0 resource_mutex irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle irq_context: 0 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &c->lock irq_context: 0 (wq_completion)events fqdir_free_work &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key#15 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 &f->f_pos_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &xa->xa_lock#6 &pl->lock irq_context: softirq rcu_read_lock &xa->xa_lock#6 &pl->lock key#12 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback stock_lock irq_context: softirq (&peer->timer_persistent_keepalive) &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &tsk->futex_exit_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#6 stock_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex __ip_vs_mutex &s->lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &data->lock irq_context: 0 (wq_completion)bat_events &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 &bdi->wb_waitq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &wb->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock sb_pagefaults &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock &im->lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 sk_lock-AF_INET &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &xt[i].mutex &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) quarantine_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &tn->node_list_lock irq_context: 0 pernet_ops_rwsem ebt_mutex irq_context: 0 pernet_ops_rwsem &xt[i].mutex irq_context: 0 rtnl_mutex &dev->tx_global_lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &lock->wait_lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &sk->sk_lock.wq irq_context: 0 pernet_ops_rwsem &lock->wait_lock irq_context: 0 pernet_ops_rwsem &nft_net->commit_mutex irq_context: 0 pernet_ops_rwsem netns_bpf_mutex irq_context: 0 pernet_ops_rwsem &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex &sch->q.lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &br->lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock deferred_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock (console_sem).lock irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &br->lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &c->lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &n->list_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &dir->lock#2 irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock deferred_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &br->multicast_lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&p->rexmit_timer) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&p->timer) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) krc.lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET stock_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (&br->hello_timer) irq_context: 0 rtnl_mutex (&br->topology_change_timer) irq_context: 0 rtnl_mutex (&br->tcn_timer) irq_context: 0 rtnl_mutex (work_completion)(&(&br->gc_work)->work) irq_context: 0 rtnl_mutex (&brmctx->ip4_mc_router_timer) irq_context: 0 rtnl_mutex (&brmctx->ip4_other_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip4_own_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_mc_router_timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_other_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_own_query.timer) irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &im->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex __ip_vs_mutex irq_context: 0 rtnl_mutex __ip_vs_mutex &rq->__lock irq_context: 0 rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex &tbl->lock krc.lock irq_context: 0 rtnl_mutex flowtable_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 sk_lock-AF_INET &sem->wait_lock irq_context: 0 sk_lock-AF_INET &p->pi_lock irq_context: 0 sk_lock-AF_INET &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &group->mark_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ul->lock#2 irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &base->lock irq_context: 0 rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex &rnp->exp_lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page) irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 slock-AF_INET pool_lock#2 irq_context: 0 slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_query_lock irq_context: 0 rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 &c->lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &nft_net->commit_mutex &p->pi_lock irq_context: 0 &nft_net->commit_mutex &p->pi_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex class irq_context: 0 pernet_ops_rwsem rtnl_mutex (&tbl->proxy_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ul->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->xdp.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mirred_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ent->pde_unload_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ndev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_report_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->routes.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex target_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_SIT irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex deferred_probe_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex device_links_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 &xa->xa_lock#12 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 stock_lock irq_context: 0 tracepoints_mutex &rnp->exp_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &x->wait#24 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rtnl_mutex &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 tracepoints_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 tracepoints_mutex tracepoint_srcu irq_context: 0 tracepoints_mutex &x->wait#3 irq_context: 0 tracepoints_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &ei->i_data_sem pool_lock#2 irq_context: 0 pernet_ops_rwsem dev_base_lock irq_context: 0 pernet_ops_rwsem lweventlist_lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 pool_lock#2 irq_context: 0 pernet_ops_rwsem netdev_unregistering_wq.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 irq_context: 0 pernet_ops_rwsem &dir->lock#2 &____s->seqcount irq_context: 0 pernet_ops_rwsem &dir->lock#2 rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem (work_completion)(&ht->run_work) irq_context: 0 pernet_ops_rwsem &ht->mutex irq_context: 0 pernet_ops_rwsem &ht->mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &ht->mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock (work_completion)(flush) irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &x->wait#10 irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ul->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &ei->i_data_sem &c->lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem napi_hash_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key#27 irq_context: 0 &sb->s_type->i_mutex_key#18 irq_context: 0 &sb->s_type->i_mutex_key#18 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 mount_lock irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 &sb->s_type->i_lock_key#27 irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 &wb->list_lock irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 &wb->list_lock &sb->s_type->i_lock_key#27 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_ETHER irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET elock-AF_INET irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_IPGRE irq_context: 0 sk_lock-AF_INET remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_node_0 irq_context: 0 pernet_ops_rwsem &fn->fou_lock irq_context: 0 pernet_ops_rwsem ipvs->sync_mutex irq_context: 0 pernet_ops_rwsem hwsim_radio_lock irq_context: 0 pernet_ops_rwsem &ent->pde_unload_lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem rdma_nets.xa_lock irq_context: 0 pernet_ops_rwsem k-clock-AF_NETLINK irq_context: 0 pernet_ops_rwsem &nlk->wait irq_context: 0 pernet_ops_rwsem wlock-AF_NETLINK irq_context: 0 pernet_ops_rwsem &xa->xa_lock#6 irq_context: 0 pernet_ops_rwsem &fsnotify_mark_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_LOOPBACK irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev->mutex &rq->__lock irq_context: 0 rtnl_mutex &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem &hn->hn_lock irq_context: 0 pernet_ops_rwsem sysctl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem sysctl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &this->info_list_lock irq_context: 0 pernet_ops_rwsem &pnettable->lock irq_context: 0 pernet_ops_rwsem &pnetids_ndev->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 &list->lock#25 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 pool_lock#2 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &pcp->lock &zone->lock irq_context: softirq (&peer->timer_send_keepalive) irq_context: softirq (&peer->timer_send_keepalive) pool_lock#2 irq_context: softirq (&peer->timer_send_keepalive) &list->lock#17 irq_context: softirq (&peer->timer_send_keepalive) tk_core.seq.seqcount irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &base->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&peer->timer_persistent_keepalive) batched_entropy_u8.lock irq_context: softirq (&peer->timer_persistent_keepalive) kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &meta->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 pernet_ops_rwsem k-slock-AF_INET irq_context: 0 pernet_ops_rwsem k-slock-AF_INET &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-slock-AF_INET pool_lock#2 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET elock-AF_INET irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->gssp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &cd->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem cache_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem cache_list_lock &cd->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->can.stattimer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem xfrm_state_gc_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->xfrm.xfrm_state_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ip6_fl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->rules_mod_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem percpu_counters_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->ipv6.ip6_fib_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&mrt->ipmr_expire_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&ipvs->dest_trash_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem recent_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hashlimit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nf_connlabels_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem tcp_metrics_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->xfrm.xfrm_policy_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.state_hash_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem genl_sk_destructing_waitq.lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 pcpu_alloc_mutex pgd_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock kfence_freelist_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &meta->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &sn->gssp_lock irq_context: 0 pernet_ops_rwsem &cd->hash_lock irq_context: 0 pernet_ops_rwsem cache_list_lock &cd->hash_lock irq_context: 0 pernet_ops_rwsem (&net->can.stattimer) irq_context: 0 pernet_ops_rwsem xfrm_state_gc_work irq_context: 0 pernet_ops_rwsem &net->xfrm.xfrm_state_lock irq_context: 0 pernet_ops_rwsem &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock irq_context: 0 pernet_ops_rwsem ip6_fl_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem (&net->ipv6.ip6_fib_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex (&mrt->ipmr_expire_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_mutex irq_context: 0 pernet_ops_rwsem (&ipvs->dest_trash_timer) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 pernet_ops_rwsem nfnl_subsys_ipset irq_context: 0 pernet_ops_rwsem recent_lock irq_context: 0 pernet_ops_rwsem hashlimit_mutex irq_context: 0 pernet_ops_rwsem (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 pernet_ops_rwsem rtnl_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem tcp_metrics_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-clock-AF_INET irq_context: 0 pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) irq_context: 0 pernet_ops_rwsem &net->xfrm.xfrm_policy_lock irq_context: 0 pernet_ops_rwsem (work_completion)(&net->xfrm.state_hash_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &list->lock#2 irq_context: 0 pernet_ops_rwsem genl_sk_destructing_waitq.lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 krc.lock irq_context: 0 &dir->lock &obj_hash[i].lock irq_context: 0 &dir->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex key irq_context: 0 pcpu_alloc_mutex percpu_counters_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 rcu_read_lock console_owner_lock irq_context: 0 rcu_read_lock console_owner irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&peer->timer_send_keepalive) &c->lock irq_context: softirq (&peer->timer_send_keepalive) &n->list_lock irq_context: softirq (&peer->timer_send_keepalive) &n->list_lock &c->lock irq_context: softirq (&p->forward_delay_timer) irq_context: softirq (&p->forward_delay_timer) &br->lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &c->lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock pool_lock#2 irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &n->list_lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock nl_table_lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &n->list_lock irq_context: softirq rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_node_0 irq_context: 0 sb_writers#8 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex (console_sem).lock irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &____s->seqcount irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#22 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &c->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#3 &rq->__lock irq_context: 0 tracepoints_mutex &obj_hash[i].lock pool_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_node_0 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &meta->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dev_map_lock irq_context: 0 map_idr_lock &obj_hash[i].lock irq_context: 0 map_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) dev_map_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock prog_idr_lock &____s->seqcount#2 irq_context: 0 lock prog_idr_lock &pcp->lock &zone->lock irq_context: 0 lock prog_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock prog_idr_lock &____s->seqcount irq_context: softirq &(&net->ipv6.addr_chk_work)->timer irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &base->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 batched_entropy_u8.lock crngs.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock fs_reclaim irq_context: 0 &f->f_pos_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &c->lock irq_context: 0 &f->f_pos_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock &____s->seqcount irq_context: 0 &f->f_pos_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bcm_notifier_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_CAN irq_context: 0 sk_lock-AF_CAN slock-AF_CAN irq_context: 0 sk_lock-AF_CAN clock-AF_CAN irq_context: 0 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 sk_lock-AF_CAN fs_reclaim irq_context: 0 sk_lock-AF_CAN fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_CAN fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_CAN fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_CAN &rq->__lock irq_context: 0 sk_lock-AF_CAN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN pool_lock#2 irq_context: 0 sk_lock-AF_CAN proc_inum_ida.xa_lock irq_context: 0 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 slock-AF_CAN irq_context: 0 smc_v4_hashinfo.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_SMC irq_context: 0 sk_lock-AF_SMC slock-AF_SMC irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &rq->__lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sk_lock-AF_SMC k-slock-AF_INET irq_context: 0 slock-AF_SMC irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET stock_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &c->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &n->list_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &n->list_lock &c->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &base->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &ei->socket.wq.wait irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_SMC fs_reclaim irq_context: 0 sk_lock-AF_SMC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_SMC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_SMC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_SMC &c->lock irq_context: 0 sk_lock-AF_SMC pool_lock#2 irq_context: 0 sk_lock-AF_SMC rtnl_mutex irq_context: 0 sk_lock-AF_SMC rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sk_lock-AF_SMC rtnl_mutex &rq->__lock irq_context: 0 sk_lock-AF_SMC rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 tomoyo_ss &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &mapping->private_lock irq_context: 0 sk_lock-AF_SMC rtnl_mutex.wait_lock irq_context: 0 sk_lock-AF_SMC &p->pi_lock irq_context: 0 sk_lock-AF_SMC &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_SMC &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_SMC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_SMC &pnettable->lock irq_context: 0 sk_lock-AF_SMC smc_ib_devices.mutex irq_context: 0 sk_lock-AF_SMC &smc_clc_eid_table.lock irq_context: 0 sk_lock-AF_SMC &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &____s->seqcount irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_SMC &smc->clcsock_release_lock irq_context: 0 sk_lock-AF_SMC &smc->clcsock_release_lock &net->smc.mutex_fback_rsn irq_context: 0 sk_lock-AF_SMC &smc->clcsock_release_lock k-clock-AF_INET irq_context: 0 sk_lock-AF_SMC &smc->clcsock_release_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 bcm_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN slock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &ent->pde_unload_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN proc_inum_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN clock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&smc->connect_work) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC slock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC smc_v4_hashinfo.lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC clock-AF_SMC irq_context: 0 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#6 &c->lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rq->__lock irq_context: 0 &p->lock remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rq->__lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 tracepoints_mutex tracepoints_mutex.wait_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock irq_context: 0 tracepoints_mutex.wait_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#6 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#6 &____s->seqcount irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_SMC &n->list_lock irq_context: 0 sk_lock-AF_SMC &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_CAN &c->lock irq_context: 0 sk_lock-AF_CAN &____s->seqcount#2 irq_context: 0 sk_lock-AF_CAN &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &xt[i].mutex batched_entropy_u8.lock irq_context: 0 &xt[i].mutex kfence_freelist_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &wdev->pmsr_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex nfc_devlist_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex nfc_devlist_mutex &k->list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex nfc_devlist_mutex &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 nlk_cb_mutex-GENERIC irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 nlk_cb_mutex-GENERIC pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 nlk_cb_mutex-GENERIC &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex nfc_devlist_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex nfc_devlist_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC rlock-AF_NETLINK irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock rcu_read_lock rlock-AF_NETLINK irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#26 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rlock-AF_NETLINK irq_context: softirq rcu_read_lock &list->lock#26 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#6 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#18 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (work_completion)(&wdev->disconnect_wk) irq_context: 0 rtnl_mutex (work_completion)(&wdev->pmsr_free_wk) irq_context: 0 rtnl_mutex (work_completion)(&sdata->activate_links_work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->sta_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER irq_context: 0 rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (&local->dynamic_ps_timer) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&sdata->recalc_smps) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->csa_finalize_work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->color_change_finalize_work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->dfs_cac_timer_work)->work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &list->lock#18 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &list->lock#19 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rlock-AF_NETLINK irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex &n->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wq->mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &list->lock#26 irq_context: 0 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &rdev->dev_wait irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock &____s->seqcount irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &base->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock remove_cache_srcu &rq->__lock irq_context: 0 cb_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 krc.lock irq_context: 0 file_rwsem &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &list->lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex console_owner_lock irq_context: 0 cb_lock genl_mutex console_owner irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &meta->lock irq_context: 0 sk_lock-AF_INET6 crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 rtnl_mutex bpf_devs_lock irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 rtnl_mutex &ul->lock irq_context: 0 rtnl_mutex &net->xdp.lock irq_context: 0 rtnl_mutex mirred_list_lock irq_context: 0 rtnl_mutex &idev->mc_report_lock irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock irq_context: 0 rtnl_mutex &pnn->routes.lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex deferred_probe_mutex irq_context: 0 rtnl_mutex device_links_lock irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET6 &base->lock irq_context: 0 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &n->list_lock &c->lock irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &asoc->wait irq_context: 0 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_state.barrier_mutex.wait_lock irq_context: 0 dev_base_lock irq_context: 0 lweventlist_lock irq_context: 0 lweventlist_lock pool_lock#2 irq_context: 0 lweventlist_lock &dir->lock#2 irq_context: 0 &dir->lock#2 irq_context: 0 &dir->lock#2 &obj_hash[i].lock irq_context: 0 &dir->lock#2 pool_lock#2 irq_context: 0 netdev_unregistering_wq.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rnp->exp_lock irq_context: 0 rtnl_mutex &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock key#22 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &rnp->exp_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 krc.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[3] irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 batched_entropy_u8.lock irq_context: 0 sb_writers#8 kn->active#5 kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 sk_lock-AF_INET6 &list->lock#27 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 krc.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 krc.lock &base->lock irq_context: 0 sk_lock-AF_INET6 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &mapping->private_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &list->lock#25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &list->lock#27 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 clock-AF_INET6 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex flowtable_lock &rq->__lock irq_context: 0 rtnl_mutex flowtable_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &rq->__lock irq_context: 0 dup_mmap_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback stock_lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &rq->__lock irq_context: 0 &disk->open_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) kfence_freelist_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &meta->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) kfence_freelist_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 rtnl_mutex &pnn->pndevs.lock &rq->__lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 tracepoints_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 tracepoints_mutex &p->pi_lock irq_context: 0 tracepoints_mutex &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &base->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_node_0 irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 irq_context: 0 dup_mmap_sem &mm->mmap_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal &base->lock irq_context: 0 sb_writers#4 sb_internal &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &n->list_lock irq_context: 0 &fsnotify_mark_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &n->list_lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 &xt[i].mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rlock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rnp->exp_wq[0] irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu quarantine_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &c->lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &rq->__lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &n->list_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu pool_lock#2 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock stock_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 file_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &xt[i].mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &xt[i].mutex &pcp->lock &zone->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &meta->lock irq_context: 0 &xs->mutex rcu_read_lock pool_lock#2 irq_context: 0 &xs->mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 rtnl_mutex qdisc_mod_lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex nf_conntrack_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 rtnl_mutex &ul->lock#2 pool_lock#2 irq_context: 0 rtnl_mutex &ul->lock#2 &dir->lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &knet->mutex irq_context: 0 &mux->lock irq_context: 0 &mux->rx_lock irq_context: 0 sk_lock-AF_KCM irq_context: 0 sk_lock-AF_KCM slock-AF_KCM irq_context: 0 sk_lock-AF_KCM fs_reclaim irq_context: 0 sk_lock-AF_KCM fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_KCM pool_lock#2 irq_context: 0 sk_lock-AF_KCM &____s->seqcount irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock irq_context: 0 slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM clock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&kcm->tx_work) irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&kcm->tx_work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&kcm->tx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->rx_lock rlock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 &knet->mutex irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &cfs_rq->removed.lock irq_context: softirq rcu_callback &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu pool_lock#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 namespace_sem stock_lock irq_context: 0 namespace_sem &____s->seqcount#2 irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#12 irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#12 pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#22/1 stock_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dgram_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_IEEE802154 irq_context: 0 sk_lock-AF_IEEE802154 slock-AF_IEEE802154 irq_context: 0 sk_lock-AF_IEEE802154 (console_sem).lock irq_context: 0 sk_lock-AF_IEEE802154 console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_IEEE802154 console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_IEEE802154 console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_IEEE802154 console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_IEEE802154 &rq->__lock irq_context: 0 sk_lock-AF_IEEE802154 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 slock-AF_IEEE802154 irq_context: 0 &sb->s_type->i_mutex_key#10 dgram_lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_IEEE802154 irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_IEEE802154 irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &c->lock irq_context: 0 sk_lock-AF_ALG &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &n->list_lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &n->list_lock &c->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &data->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fib_info_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 slock-AF_ALG irq_context: 0 sk_lock-AF_ALG &rq->__lock irq_context: 0 sk_lock-AF_ALG &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#9 kfence_freelist_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_es_lock key#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 loop_validate_mutex &lo->lo_mutex &rq->__lock irq_context: 0 loop_validate_mutex &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu batched_entropy_u8.lock irq_context: 0 &fsnotify_mark_srcu kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 clock-AF_PHONET irq_context: 0 rlock-AF_PHONET irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->packet.sklist_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &list->lock#5 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 sb_writers#4 quarantine_lock irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 &sch->q.lock irq_context: 0 sk_lock-AF_RXRPC irq_context: 0 sk_lock-AF_RXRPC &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC slock-AF_RXRPC irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex stock_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 slock-AF_RXRPC irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex smc_lgr_list.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rtnl_mutex &tbl->lock &____s->seqcount#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 &type->i_mutex_dir_key/1 irq_context: 0 &type->i_mutex_dir_key/1 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key/1 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &c->lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex subsys mutex#81 &k->k_lock klist_remove_lock irq_context: 0 rtnl_mutex gdp_mutex sysfs_symlink_target_lock irq_context: 0 rtnl_mutex gdp_mutex kernfs_idr_lock irq_context: 0 rtnl_mutex gdp_mutex &obj_hash[i].lock irq_context: 0 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_lock irq_context: 0 rtnl_mutex (work_completion)(&port->bc_work) irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex irq_context: 0 sk_lock-AF_ALG &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &ei->socket.wq.wait irq_context: 0 rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 krc.lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG slock-AF_ALG &sk->sk_lock.wq irq_context: 0 slock-AF_ALG &sk->sk_lock.wq irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &folio_wait_table[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_ALG quarantine_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_ALG &____s->seqcount#2 irq_context: 0 sk_lock-AF_ALG &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &xa->xa_lock#6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &fsnotify_mark_srcu irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-clock-AF_RXRPC irq_context: 0 pernet_ops_rwsem (wq_completion)krxrpcd irq_context: 0 pernet_ops_rwsem &wq->mutex irq_context: 0 pernet_ops_rwsem &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem &wq->mutex &x->wait#10 irq_context: 0 pernet_ops_rwsem rlock-AF_RXRPC irq_context: 0 pernet_ops_rwsem (&net->fs_probe_timer) irq_context: 0 pernet_ops_rwsem &net->cells_lock irq_context: 0 pernet_ops_rwsem (&net->cells_timer) irq_context: 0 pernet_ops_rwsem bit_wait_table + i irq_context: 0 pernet_ops_rwsem (&net->fs_timer) irq_context: 0 pernet_ops_rwsem ovs_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex (work_completion)(&data->gc_work) irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem ovs_mutex net_rwsem irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 pernet_ops_rwsem (work_completion)(&ovs_net->dp_notify_work) irq_context: 0 pernet_ops_rwsem &srv->idr_lock irq_context: 0 pernet_ops_rwsem (work_completion)(&tn->work) irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem &pool->lock/1 irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&peer->timer_persistent_keepalive) &n->list_lock irq_context: softirq (&peer->timer_persistent_keepalive) &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_RXRPC irq_context: 0 &sb->s_type->i_mutex_key#10 (wq_completion)krxrpcd irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &x->wait#10 irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem (work_completion)(&(&c->work)->work) irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 pernet_ops_rwsem (wq_completion)krdsd irq_context: 0 pernet_ops_rwsem (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 pernet_ops_rwsem rds_tcp_conn_lock irq_context: 0 pernet_ops_rwsem loop_conns_lock irq_context: 0 pernet_ops_rwsem (wq_completion)l2tp irq_context: 0 &xt[i].mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_RXRPC irq_context: 0 &sb->s_type->i_mutex_key#10 &x->wait irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &table->hash[i].lock irq_context: 0 rtnl_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex k-clock-AF_INET irq_context: 0 rtnl_mutex &xa->xa_lock#6 irq_context: 0 rtnl_mutex &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem (&rxnet->peer_keepalive_timer) irq_context: 0 pernet_ops_rwsem (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem (&rxnet->service_conn_reap_timer) irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &x->wait#10 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_NONE irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 purge_vmap_area_lock &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work &obj_hash[i].lock pool_lock irq_context: 0 purge_vmap_area_lock kfence_freelist_lock irq_context: 0 &child->perf_event_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem kfence_freelist_lock irq_context: 0 pernet_ops_rwsem &meta->lock irq_context: 0 &mq_lock irq_context: 0 (wq_completion)events free_ipc_work irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock mount_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work mount_lock irq_context: 0 (wq_completion)events free_ipc_work mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 shrinker_rwsem irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rename_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &sb->s_type->i_lock_key#20 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &s->s_inode_list_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &xa->xa_lock#6 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 sb_lock irq_context: 0 (wq_completion)events free_ipc_work unnamed_dev_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work list_lrus_mutex irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#12 irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#12 &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#12 pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sb_lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work mnt_id_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work &ids->rwsem irq_context: 0 (wq_completion)events free_ipc_work (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work percpu_counters_lock irq_context: 0 (wq_completion)events free_ipc_work pcpu_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock irq_context: 0 (wq_completion)events free_ipc_work proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work stock_lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &base->lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 lock pidmap_lock &____s->seqcount#2 irq_context: 0 lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock pidmap_lock &____s->seqcount irq_context: 0 kn->active#5 fs_reclaim &rq->__lock irq_context: 0 kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle key#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 namespace_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 pernet_ops_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock sb_writers#4 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 namespace_sem remove_cache_srcu irq_context: 0 namespace_sem remove_cache_srcu quarantine_lock irq_context: 0 namespace_sem remove_cache_srcu &c->lock irq_context: 0 namespace_sem remove_cache_srcu &n->list_lock irq_context: 0 namespace_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 namespace_sem remove_cache_srcu pool_lock#2 irq_context: 0 namespace_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 namespace_sem remove_cache_srcu &rq->__lock irq_context: 0 namespace_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#22/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &meta->lock irq_context: 0 sk_lock-AF_ALG (console_sem).lock irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG batched_entropy_u8.lock irq_context: 0 sk_lock-AF_ALG kfence_freelist_lock irq_context: 0 sk_lock-AF_ALG &meta->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &sem->wait_lock irq_context: 0 sk_lock-AF_ALG &sem->wait_lock irq_context: 0 sk_lock-AF_ALG &p->pi_lock irq_context: 0 sk_lock-AF_ALG &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex wq_pool_mutex.wait_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex.wait_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &p->pi_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &obj_hash[i].lock pool_lock irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[3] irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_node_0 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem &n->list_lock irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 namespace_sem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 namespace_sem rcu_read_lock rcu_node_0 irq_context: 0 namespace_sem rcu_read_lock &rq->__lock irq_context: 0 namespace_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex &rq->__lock irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex.wait_lock irq_context: 0 &type->s_umount_key#22/1 &p->pi_lock irq_context: 0 &type->s_umount_key#22/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#22/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mount_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#22/1 rcu_read_lock rcu_node_0 irq_context: 0 &type->s_umount_key#22/1 rcu_read_lock &rq->__lock irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu quarantine_lock irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu &c->lock irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu &n->list_lock irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#22/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount#2 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &meta->lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex &rnp->exp_wq[0] irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &rnp->exp_wq[1] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#12 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex rcu_node_0 irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &c->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG rcu_node_0 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu irq_context: 0 sk_lock-AF_ALG remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &wb->work_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 key#12 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 key#13 irq_context: 0 sb_writers#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_writers#4 &wb->list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &sighand->siglock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &n->list_lock irq_context: 0 &sighand->siglock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crypto_alg_sem &rq->__lock irq_context: 0 crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle key#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock pool_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) quarantine_lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[2] irq_context: 0 namespace_sem &n->list_lock &c->lock irq_context: 0 wq_pool_attach_mutex wq_pool_attach_mutex.wait_lock irq_context: 0 wq_pool_attach_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex.wait_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_ALG &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_node_0 irq_context: 0 &group->mark_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 &group->mark_mutex remove_cache_srcu irq_context: 0 &group->mark_mutex remove_cache_srcu quarantine_lock irq_context: 0 &group->mark_mutex remove_cache_srcu &c->lock irq_context: 0 &group->mark_mutex remove_cache_srcu &n->list_lock irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &group->mark_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &group->mark_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex raw_notifier_lock irq_context: 0 rtnl_mutex bcm_notifier_lock irq_context: 0 rtnl_mutex isotp_notifier_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work pool_lock irq_context: 0 rtnl_mutex act_mod_lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock pool_lock#2 irq_context: 0 rtnl_mutex &tn->idrinfo->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &base->lock irq_context: 0 sb_writers#4 &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &br->multicast_lock irq_context: 0 rtnl_mutex (work_completion)(&br->mcast_gc_work) irq_context: 0 rtnl_mutex rcu_state.barrier_mutex irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 rtnl_mutex &ht->mutex irq_context: 0 rtnl_mutex &ht->mutex &rq->__lock irq_context: 0 rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock_bh quarantine_lock irq_context: 0 pidmap_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 ppp_mutex irq_context: 0 &fsnotify_mark_srcu &____s->seqcount irq_context: 0 ppp_mutex &mm->mmap_lock irq_context: 0 ppp_mutex fs_reclaim irq_context: 0 ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex stock_lock irq_context: 0 ppp_mutex &c->lock irq_context: 0 ppp_mutex pool_lock#2 irq_context: 0 ppp_mutex stack_depot_init_mutex irq_context: 0 ppp_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 ppp_mutex stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex net_rwsem irq_context: 0 ppp_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 ppp_mutex rtnl_mutex &tn->lock irq_context: 0 ppp_mutex rtnl_mutex &x->wait#9 irq_context: 0 ppp_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &k->list_lock irq_context: 0 ppp_mutex rtnl_mutex gdp_mutex irq_context: 0 ppp_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 ppp_mutex rtnl_mutex lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 ppp_mutex rtnl_mutex bus_type_sem irq_context: 0 ppp_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 ppp_mutex rtnl_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &c->lock irq_context: 0 ppp_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 ppp_mutex rtnl_mutex &dev->power.lock irq_context: 0 ppp_mutex rtnl_mutex dpm_list_mtx irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex subsys mutex#17 irq_context: 0 ppp_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 ppp_mutex rtnl_mutex &dir->lock#2 irq_context: 0 ppp_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 ppp_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 ppp_mutex rtnl_mutex dev_base_lock irq_context: 0 ppp_mutex rtnl_mutex input_pool.lock irq_context: 0 ppp_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 ppp_mutex rtnl_mutex &tbl->lock irq_context: 0 ppp_mutex rtnl_mutex stock_lock irq_context: 0 ppp_mutex rtnl_mutex &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex sysctl_lock irq_context: 0 ppp_mutex rtnl_mutex nl_table_lock irq_context: 0 ppp_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex proc_subdir_lock irq_context: 0 ppp_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 ppp_mutex rtnl_mutex proc_subdir_lock irq_context: 0 ppp_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &pnettable->lock irq_context: 0 ppp_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 ppp_mutex rtnl_mutex &ppp->rlock irq_context: 0 ppp_mutex rtnl_mutex &ppp->wlock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 ppp_mutex rtnl_mutex.wait_lock irq_context: 0 ppp_mutex &p->pi_lock irq_context: 0 ppp_mutex &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ppp->rlock irq_context: 0 rtnl_mutex &ppp->wlock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#4 irq_context: 0 rtnl_mutex &ppp->wlock &ppp->rlock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex irq_context: 0 rtnl_mutex &pn->all_ppp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex pool_lock#2 irq_context: 0 rtnl_mutex &pf->rwait irq_context: 0 rtnl_mutex bus_type_sem &rq->__lock irq_context: 0 &ppp->wlock irq_context: 0 &ppp->wlock &ppp->rlock irq_context: 0 &list->lock#28 irq_context: 0 &child->perf_event_mutex &obj_hash[i].lock irq_context: 0 &child->perf_event_mutex pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem &sem->wait_lock irq_context: 0 &p->lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &ei->socket.wq.wait irq_context: 0 ppp_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 ppp_mutex rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 ppp_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vsock_table_lock irq_context: 0 ppp_mutex &n->list_lock irq_context: 0 ppp_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK irq_context: 0 sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 sk_lock-AF_VSOCK &rq->__lock irq_context: 0 sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK clock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rlock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_VSOCK irq_context: 0 rtnl_mutex &br->hash_lock &____s->seqcount#2 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET clock-AF_INET irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET crngs.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET &asoc->wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_INET crypto_alg_sem irq_context: 0 sk_lock-AF_INET (kmod_concurrent_max).lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET &x->wait#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&timer) rcu_read_lock &n->list_lock irq_context: softirq (&timer) rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET fs_reclaim &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET fs_reclaim &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key nr_node_list_lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key pool_lock#2 irq_context: 0 &mm->mmap_lock &map->freeze_mutex irq_context: 0 &mm->mmap_lock &map->freeze_mutex &vma->vm_lock->lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex vmap_area_lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex fs_reclaim irq_context: 0 &mm->mmap_lock &map->freeze_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &map->freeze_mutex &____s->seqcount irq_context: 0 &mm->mmap_lock &map->freeze_mutex &rq->__lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock &map->freeze_mutex stock_lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &map->freeze_mutex ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &map->freeze_mutex ptlock_ptr(page)#2 key irq_context: 0 clock-AF_LLC irq_context: 0 sk_lock-AF_LLC irq_context: 0 sk_lock-AF_LLC slock-AF_LLC irq_context: 0 sk_lock-AF_LLC &mm->mmap_lock irq_context: 0 sk_lock-AF_LLC &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_LLC irq_context: 0 sk_lock-AF_INET running_helpers_waitq.lock irq_context: 0 rds_sock_lock irq_context: 0 sk_lock-AF_RDS irq_context: 0 sk_lock-AF_RDS slock-AF_RDS irq_context: 0 slock-AF_RDS irq_context: 0 sk_lock-AF_RDS &mm->mmap_lock irq_context: 0 sk_lock-AF_RDS &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_RDS &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RDS rds_trans_sem irq_context: 0 sk_lock-AF_RDS once_lock irq_context: 0 sk_lock-AF_RDS once_lock crngs.lock irq_context: 0 sk_lock-AF_RDS &rq->__lock irq_context: 0 sk_lock-AF_RDS &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RDS pool_lock#2 irq_context: 0 sk_lock-AF_RDS &obj_hash[i].lock irq_context: 0 sk_lock-AF_RDS &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 &xt[i].mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_RDS rcu_read_lock rhashtable_bucket irq_context: 0 rds_cong_lock irq_context: 0 rcu_read_lock loop_conns_lock irq_context: 0 rcu_read_lock rds_conn_lock irq_context: 0 rcu_read_lock rds_conn_lock rds_cong_lock irq_context: 0 (wq_completion)krdsd irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_conn_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_conn_w)->work) rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_conn_w)->work) rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_conn_w)->work) rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_conn_w)->work) rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_conn_w)->work) rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_conn_w)->work) rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) irq_context: 0 &net->xfrm.xfrm_cfg_mutex &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rs->rs_lock irq_context: 0 &rs->rs_lock &cp->cp_lock irq_context: 0 &cp->cp_lock irq_context: 0 &rs->rs_recv_lock irq_context: 0 &rs->rs_recv_lock rds_cong_lock irq_context: 0 &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rs->rs_recv_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex xfrm_state_gc_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events xfrm_state_gc_work irq_context: 0 (wq_completion)events xfrm_state_gc_work xfrm_state_gc_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rm->m_rs_lock irq_context: 0 &rm->m_rs_lock &rs->rs_lock irq_context: 0 &rs->rs_recv_lock irq_context: 0 &rs->rs_recv_lock &ei->socket.wq.wait irq_context: 0 &map->m_waitq irq_context: 0 (wq_completion)events xfrm_state_gc_work &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work (&x->rtimer) irq_context: 0 (wq_completion)events xfrm_state_gc_work &base->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rlock-AF_NETLINK irq_context: 0 ppp_mutex rtnl_mutex batched_entropy_u8.lock irq_context: 0 ppp_mutex rtnl_mutex kfence_freelist_lock irq_context: 0 ppp_mutex rtnl_mutex &meta->lock irq_context: softirq &(&cp->cp_send_w)->timer irq_context: softirq &(&cp->cp_send_w)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&cp->cp_send_w)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&cp->cp_send_w)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&cp->cp_send_w)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&cp->cp_send_w)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &map->m_waitq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &map->m_waitq &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &map->m_waitq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &map->m_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#17 irq_context: 0 rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET crypto_alg_sem irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem fs_reclaim irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem &c->lock irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem pool_lock#2 irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem kthread_create_lock irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem &p->pi_lock irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem &x->wait irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem &rq->__lock irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sk_lock-AF_INET (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &x->wait#21 irq_context: 0 sk_lock-AF_INET (&timer.timer) irq_context: 0 sk_lock-AF_INET &sctp_ep_hashtable[i].lock irq_context: 0 sk_lock-AF_INET per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET &dir->lock irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 slock-AF_INET irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 &asoc->wait irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 &asoc->wait &p->pi_lock irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 &asoc->wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 &asoc->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 &rq->__lock irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 pcpu_alloc_mutex free_vmap_area_lock irq_context: 0 pcpu_alloc_mutex vmap_area_lock irq_context: 0 pcpu_alloc_mutex init_mm.page_table_lock irq_context: 0 pcpu_alloc_mutex &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) irq_context: 0 sb_writers#4 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_CAN &mm->mmap_lock irq_context: 0 sk_lock-AF_CAN &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &lruvec->lru_lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_CAN tk_core.seq.seqcount irq_context: 0 sk_lock-AF_CAN &list->lock#5 irq_context: 0 sk_lock-AF_CAN hrtimer_bases.lock irq_context: 0 sk_lock-AF_CAN hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAN hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock rcu_read_lock key#10 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rlock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &list->lock#25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &asoc->wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &asoc->wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &asoc->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &asoc->wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &asoc->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_ep_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 clock-AF_INET irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex mrt_lock irq_context: 0 rtnl_mutex mrt_lock pool_lock#2 irq_context: 0 pcpu_alloc_mutex &____s->seqcount#2 irq_context: 0 rtnl_mutex mrt_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 fs_reclaim irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 pcpu_alloc_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pcpu_alloc_mutex &n->list_lock irq_context: 0 pcpu_alloc_mutex &n->list_lock &c->lock irq_context: 0 mem_id_lock irq_context: 0 mem_id_lock &rq->__lock irq_context: 0 mem_id_lock fs_reclaim irq_context: 0 mem_id_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mem_id_lock &c->lock irq_context: 0 mem_id_lock pool_lock#2 irq_context: 0 mem_id_lock batched_entropy_u32.lock irq_context: 0 mem_id_lock &obj_hash[i].lock irq_context: 0 mem_id_lock mem_id_pool.xa_lock irq_context: 0 mem_id_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock &r->producer_lock#3 irq_context: 0 remove_cache_srcu &meta->lock irq_context: 0 remove_cache_srcu kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &r->consumer_lock#3 irq_context: 0 mem_id_lock &ht->lock irq_context: 0 mem_id_lock rcu_read_lock &ht->lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 mem_id_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 mem_id_lock rcu_read_lock pool_lock#2 irq_context: 0 mem_id_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN hrtimer_bases.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#13 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#13 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 pidmap_lock pool_lock#2 irq_context: softirq rcu_callback mem_id_pool.xa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 key#23 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pgd_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pcpu_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 percpu_counters_lock irq_context: 0 &group->mark_mutex remove_cache_srcu &rq->__lock irq_context: 0 &group->mark_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu quarantine_lock irq_context: 0 &group->mark_mutex &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_RDS irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_recv_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_recv_lock &rm->m_rs_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_recv_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_recv_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rds_cong_monitor_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_cong_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_rdma_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &q->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_sock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC slock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->pf_cycle_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->ack_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->rej_sent_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->busy_state_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_LLC irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#29 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock irq_context: 0 &sb->s_type->i_mutex_key#10 cpu_hotplug_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 rcu_read_lock rds_conn_lock loop_conns_lock irq_context: 0 rcu_read_lock rds_conn_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rds_conn_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex fs_reclaim &rq->__lock irq_context: 0 &group->mark_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &child->perf_event_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 key irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &meta->lock irq_context: 0 sb_writers#4 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &xa->xa_lock#6 &c->lock irq_context: 0 &xa->xa_lock#6 &n->list_lock irq_context: 0 &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &n->list_lock irq_context: 0 &vma->vm_lock->lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sk_lock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &meta->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->alloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &list->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: softirq rcu_read_lock rlock-AF_CAN irq_context: softirq rcu_read_lock elock-AF_CAN irq_context: softirq rcu_read_lock &dir->lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock rds_cong_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rm->m_rs_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rm->m_rs_lock &rs->rs_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &c->lock irq_context: 0 sb_writers#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &n->list_lock irq_context: 0 sb_writers#4 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_CAN &n->list_lock irq_context: 0 sk_lock-AF_CAN &n->list_lock &c->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 fanout_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET crypto_alg_sem &rq->__lock irq_context: 0 sk_lock-AF_INET crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock pgd_lock irq_context: 0 tomoyo_ss rcu_read_lock key irq_context: 0 tomoyo_ss rcu_read_lock pcpu_lock irq_context: 0 tomoyo_ss rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&hctx->run_work)->timer irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 sb_internal jbd2_handle irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &meta->lock irq_context: 0 sb_writers#4 kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 sb_writers#4 remove_cache_srcu irq_context: 0 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem batched_entropy_u8.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 batched_entropy_u32.lock crngs.lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 kfence_freelist_lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_CAN &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex freezer_mutex.wait_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &meta->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_conn_w)->work) rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mem_id_lock rcu_read_lock &rq->__lock irq_context: 0 mem_id_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 __ip_vs_mutex nf_hook_mutex irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &base->lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[0] irq_context: 0 __ip_vs_mutex nf_hook_mutex cpu_hotplug_lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &xt[i].mutex rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC &rq->__lock irq_context: 0 sk_lock-AF_LLC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN batched_entropy_u8.lock irq_context: 0 sk_lock-AF_CAN kfence_freelist_lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 &meta->lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock_bh _xmit_NONE#2 kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 quarantine_lock irq_context: 0 ebt_mutex &mm->mmap_lock &rq->__lock irq_context: 0 ebt_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_sockopt_mutex nf_sockopt_mutex.wait_lock irq_context: 0 nf_sockopt_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[0] irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex devnet_rename_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 rtnl_mutex &tn->lock &rq->__lock irq_context: 0 rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 &u->iolock stock_lock irq_context: 0 &u->iolock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock rcu_read_lock &____s->seqcount irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#7 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock &wq irq_context: 0 &u->iolock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &ht->lock irq_context: 0 &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 cb_lock genl_mutex remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex fs_reclaim irq_context: 0 rtnl_mutex ipvs->sync_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex ipvs->sync_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ipvs->sync_mutex pool_lock#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex stock_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ipvs->sync_mutex &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex ipvs->sync_mutex &dir->lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &rq->__lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex k-slock-AF_INET irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET pool_lock#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &im->lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET _xmit_IPGRE irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET _xmit_IPGRE pool_lock#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &base->lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex kthread_create_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &p->pi_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex &rq->__lock irq_context: 0 rtnl_mutex ipvs->sync_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ipvs->sync_mutex &x->wait irq_context: 0 rtnl_mutex ipvs->sync_mutex &ipvs->sync_buff_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_nat_locks[i] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &bgl->locks[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex devnet_rename_sem &____s->seqcount#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET sk_lock-AF_INET/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 prog_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &____s->seqcount irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock prog_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 lock prog_idr_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &c->lock irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 &net->packet.sklist_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 &u->iolock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex &rq->__lock irq_context: 0 pack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock irq_context: 0 pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex jump_label_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ifibss->timer) irq_context: softirq (&ifibss->timer) &rdev->wiphy_work_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock &____s->seqcount#10 irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &nf_conntrack_locks[i] irq_context: 0 &p->lock &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &rq->__lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pfkey_mutex irq_context: 0 rlock-AF_KEY irq_context: 0 &sb->s_type->i_mutex_key#10 pfkey_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_KEY irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_KEY irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX slock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->hash_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX clock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rlock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PPPOX irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_KEY irq_context: 0 sb_writers#4 sb_internal jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_writers#4 mount_lock irq_context: 0 sb_writers#4 sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_writers#4 &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex stock_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &local->filter_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tn->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &x->wait#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &k->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex gdp_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex bus_type_sem irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->power.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dpm_list_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_base_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex input_pool.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex failover_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnettable->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock map_idr_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rlock-AF_NETLINK irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &obj_hash[i].lock irq_context: 0 rcu_read_lock &dtab->index_lock irq_context: 0 rcu_read_lock &dtab->index_lock stock_lock irq_context: 0 rcu_read_lock &dtab->index_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem kernfs_pr_cont_lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex stock_lock irq_context: 0 l2tp_ip_lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &rq->__lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock fs_reclaim irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &____s->seqcount irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &c->lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &net->can.rcvlists_lock irq_context: 0 sk_lock-AF_CAN &priv->lock irq_context: 0 sk_lock-AF_CAN &priv->lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN &priv->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAN &priv->j1939_socks_lock irq_context: 0 &sb->s_type->i_mutex_key#10 l2tp_ip_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &priv->j1939_socks_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &priv->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &priv->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &priv->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &net->can.rcvlists_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &priv->lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback rlock-AF_CAN irq_context: softirq rcu_callback elock-AF_CAN irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 stock_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock &dd->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &folio_wait_table[i] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ul->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &n->list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &list->lock#19 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &list->lock#19 irq_context: softirq rcu_read_lock lock#6 &kcov->lock irq_context: softirq rcu_read_lock &local->ack_status_lock irq_context: softirq rcu_read_lock &local->ack_status_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->ack_status_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 &kcov->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sem->wait_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle bit_wait_table + i irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 sb_writers#4 sb_writers#4 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN &priv->lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 sb_internal &n->list_lock irq_context: 0 sb_writers#4 sb_internal &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_writers#4 &n->list_lock irq_context: 0 sb_writers#4 sb_writers#4 &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex quarantine_lock irq_context: 0 sb_writers#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &pcp->lock &zone->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->hb_timer) slock-AF_INET6 &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: softirq (&n->timer) &n->lock pool_lock#2 irq_context: softirq (&n->timer) pool_lock#2 irq_context: softirq (&n->timer) &c->lock irq_context: softirq (&n->timer) &dir->lock#2 irq_context: softirq (&n->timer) &n->list_lock irq_context: softirq (&n->timer) &n->list_lock &c->lock irq_context: softirq (&n->timer) &ul->lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: softirq (&n->timer) &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &p->lock rcu_read_lock rcu_node_0 irq_context: 0 &p->lock rcu_read_lock &rq->__lock irq_context: 0 &p->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &c->lock irq_context: 0 &mm->mmap_lock stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER &c->lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER ip_set_ref_lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER &rq->__lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock rcu_node_0 irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock &rq->__lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ppp->wlock &list->lock#28 irq_context: 0 &ppp->wlock &obj_hash[i].lock irq_context: 0 &ppp->wlock pool_lock#2 irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock &rcu_state.expedited_wq irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rlock-AF_NETLINK irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 &____s->seqcount#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock fs_reclaim irq_context: 0 sb_writers#4 mapping.invalidate_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock &n->lock pool_lock#2 irq_context: 0 rcu_read_lock &ul->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#8 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex nf_hook_mutex fs_reclaim irq_context: 0 sb_writers#4 sb_writers#4 batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_writers#4 kfence_freelist_lock irq_context: 0 sb_writers#4 sb_writers#4 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &meta->lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock kfence_freelist_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 vlan_ioctl_mutex vlan_ioctl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 vlan_ioctl_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &rq->__lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 &____s->seqcount#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 &n->list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &pnsocks.lock irq_context: 0 &sb->s_type->i_mutex_key#10 resource_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit &p->pi_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx rlock-AF_PACKET irq_context: 0 &ep->mtx wlock-AF_PACKET irq_context: 0 sb_writers#4 mapping.invalidate_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pcpu_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock pgd_lock irq_context: 0 sb_writers#4 rcu_read_lock stock_lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 rcu_read_lock key irq_context: 0 sb_writers#4 rcu_read_lock pcpu_lock irq_context: 0 sb_writers#4 rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#4 rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 rcu_read_lock quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim &rq->__lock irq_context: 0 tracepoints_mutex reg_lock irq_context: 0 tracepoints_mutex reg_lock fs_reclaim irq_context: 0 tracepoints_mutex reg_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tracepoints_mutex reg_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#10 irq_context: 0 &mm->mmap_lock lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock lock#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 &mm->mmap_lock lock#10 irq_context: 0 &mm->mmap_lock lock#10 rcu_read_lock kernfs_rename_lock irq_context: softirq (&n->timer) rcu_read_lock &ndev->lock irq_context: softirq (&n->timer) icmp_global.lock irq_context: softirq (&n->timer) icmp_global.lock batched_entropy_u8.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&n->timer) &dir->lock irq_context: softirq (&n->timer) stock_lock irq_context: softirq (&n->timer) rcu_read_lock id_table_lock irq_context: softirq (&n->timer) &n->lock irq_context: softirq (&n->timer) nl_table_lock irq_context: softirq (&n->timer) nl_table_wait.lock irq_context: 0 &xt[i].mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock lock#10 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock lock#10 irq_context: 0 dup_mmap_sem &mm->mmap_lock lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 lock#10 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 rtnl_mutex &br->lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &br->lock kfence_freelist_lock irq_context: 0 &tfile->napi_mutex irq_context: 0 &tfile->napi_mutex &____s->seqcount irq_context: 0 &tfile->napi_mutex pool_lock#2 irq_context: 0 &tfile->napi_mutex &mm->mmap_lock irq_context: 0 &tfile->napi_mutex pcpu_lock irq_context: 0 &tfile->napi_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 stock_lock irq_context: 0 sk_lock-AF_INET6 mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_INET6 &dir->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &dir->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &msk->pm.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock lock#10 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 tracepoints_mutex reg_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &im->lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#3 oom_adj_mutex &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_NETROM#2 irq_context: 0 rtnl_mutex &this->info_list_lock irq_context: softirq (&n->timer) &n->lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &meta->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock irq_context: 0 &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex &tun->lock irq_context: 0 rtnl_mutex wlock-AF_UNSPEC irq_context: 0 rtnl_mutex elock-AF_UNSPEC irq_context: 0 rtnl_mutex lweventlist_lock &n->list_lock irq_context: 0 rtnl_mutex lweventlist_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &tun->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex text_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 cb_lock (console_sem).lock irq_context: 0 cb_lock console_lock console_srcu console_owner_lock irq_context: 0 cb_lock console_lock console_srcu console_owner irq_context: 0 cb_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock once_lock irq_context: 0 cb_lock once_lock crngs.lock irq_context: 0 cb_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nf_hook_mutex irq_context: 0 cb_lock nf_hook_mutex fs_reclaim irq_context: 0 cb_lock nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nf_hook_mutex stock_lock irq_context: 0 cb_lock nf_hook_mutex pool_lock#2 irq_context: 0 cb_lock cpu_hotplug_lock irq_context: 0 cb_lock &ilan->xlat.locks irq_context: 0 cb_lock &ilan->xlat.locks rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex netpoll_srcu irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pn->hash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &sch->q.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &im->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex class irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&tbl->proxy_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dir->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ifa->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &c->lock irq_context: 0 &nft_net->commit_mutex stock_lock irq_context: 0 &nft_net->commit_mutex batched_entropy_u32.lock irq_context: 0 &nft_net->commit_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock kfence_freelist_lock irq_context: 0 &nft_net->commit_mutex &____s->seqcount irq_context: 0 &nft_net->commit_mutex rcu_read_lock pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rnp->exp_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 &nft_net->commit_mutex (work_completion)(&(&priv->gc_work)->work) irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_query_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 vlan_ioctl_mutex rtnl_mutex bpf_devs_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex (work_completion)(&ht->run_work) irq_context: 0 &nft_net->commit_mutex &ht->mutex irq_context: 0 &nft_net->commit_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex &ht->mutex pool_lock#2 irq_context: 0 &nft_net->commit_mutex &obj_hash[i].lock pool_lock irq_context: 0 &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->xdp.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex mirred_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &nft_net->commit_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ent->pde_unload_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_report_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->pndevs.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->routes.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex target_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex deferred_probe_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex device_links_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &n->list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex dev_base_lock irq_context: 0 vlan_ioctl_mutex lweventlist_lock irq_context: 0 vlan_ioctl_mutex lweventlist_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex pcpu_lock irq_context: 0 vlan_ioctl_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex &dir->lock#2 irq_context: 0 vlan_ioctl_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex krc.lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 pool_lock#2 irq_context: 0 vlan_ioctl_mutex netdev_unregistering_wq.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu pool_lock#2 irq_context: 0 &tfile->napi_mutex &rq->__lock irq_context: 0 &tfile->napi_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex.wait_lock irq_context: 0 tracepoints_mutex reg_lock &p->pi_lock irq_context: 0 tracepoints_mutex reg_lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex reg_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex reg_lock &rq->__lock irq_context: 0 text_mutex rcu_read_lock &rq->__lock irq_context: 0 text_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock kfence_freelist_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq (&mp->timer) irq_context: softirq (&mp->timer) &br->multicast_lock irq_context: softirq (&mp->timer) &br->multicast_lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock &dir->lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock deferred_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&mp->timer) &br->multicast_lock nl_table_lock irq_context: softirq (&mp->timer) &br->multicast_lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock nl_table_wait.lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &nft_net->commit_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&mp->timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &c->lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &nft_net->commit_mutex &lock->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex &ht->mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &lock->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) rcu_node_0 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 k-clock-AF_INET irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 loop_validate_mutex &rq->__lock irq_context: 0 loop_validate_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bt_proto_lock &n->list_lock irq_context: 0 bt_proto_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &mm->mmap_lock irq_context: 0 &sb->s_type->i_mutex_key#10 l2cap_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 wlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 chan_list_lock irq_context: 0 &sb->s_type->i_lock_key#8 bit_wait_table + i irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) crngs.lock irq_context: 0 lock#3 &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &____s->seqcount#2 irq_context: 0 fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_acct fs_reclaim irq_context: 0 nfnl_subsys_acct fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_acct fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 nfnl_subsys_acct fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_acct pool_lock#2 irq_context: 0 nfnl_subsys_acct &obj_hash[i].lock irq_context: 0 nfnl_subsys_acct krc.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_UNIX rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_UNIX rcu_read_lock clock-AF_UNIX irq_context: 0 sk_lock-AF_UNIX rcu_read_lock clock-AF_UNIX &c->lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock clock-AF_UNIX pool_lock#2 irq_context: 0 sk_lock-AF_UNIX rcu_read_lock clock-AF_UNIX &obj_hash[i].lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock unix_dgram_prot_lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock &stab->lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock &stab->lock &psock->link_lock irq_context: 0 sk_lock-AF_UNIX &rq->__lock irq_context: 0 sk_lock-AF_UNIX &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &stab->lock irq_context: 0 rcu_read_lock &stab->lock &psock->link_lock irq_context: 0 rcu_read_lock &stab->lock &psock->link_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &stab->lock &psock->link_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX slock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &stab->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &stab->lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX &psock->ingress_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&(&psock->work)->work) irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 &psock->ingress_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_wq[1] irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &nft_net->commit_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) (work_completion)(&(&psock->work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) &list->lock#30 irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) rlock-AF_UNIX irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_NETROM ax25_uid_lock irq_context: 0 sk_lock-AF_NETROM nr_list_lock irq_context: 0 nr_list_lock irq_context: 0 sk_lock-AF_NETROM pool_lock#2 irq_context: 0 sk_lock-AF_NETROM &list->lock#31 irq_context: 0 sk_lock-AF_NETROM &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM &base->lock irq_context: 0 sk_lock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM &ei->socket.wq.wait irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM rlock-AF_NETROM irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: softirq net/netrom/nr_loopback.c:18 irq_context: softirq net/netrom/nr_loopback.c:18 &list->lock#31 irq_context: softirq net/netrom/nr_loopback.c:18 nr_list_lock irq_context: softirq net/netrom/nr_loopback.c:18 &c->lock irq_context: softirq net/netrom/nr_loopback.c:18 pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 &dir->lock irq_context: softirq net/netrom/nr_loopback.c:18 &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &list->lock#31 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &base->lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rlock-AF_NETROM irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/netrom/nr_loopback.c:18 &base->lock irq_context: softirq net/netrom/nr_loopback.c:18 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM clock-AF_NETROM irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_PIMREG irq_context: 0 rtnl_mutex mrt_lock#2 irq_context: 0 rtnl_mutex mrt_lock#2 pool_lock#2 irq_context: 0 rtnl_mutex mrt_lock#2 &dir->lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq irq_context: 0 &fsnotify_mark_srcu rcu_node_0 irq_context: 0 &fsnotify_mark_srcu &rcu_state.expedited_wq irq_context: 0 &fsnotify_mark_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock pgd_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock stock_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock key irq_context: 0 &vma->vm_lock->lock rcu_read_lock pcpu_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock percpu_counters_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: softirq (&p->timer) irq_context: softirq (&p->timer) &br->multicast_lock irq_context: softirq (&p->timer) &br->multicast_lock pool_lock#2 irq_context: softirq (&p->timer) &br->multicast_lock &dir->lock#2 irq_context: softirq (&p->timer) &br->multicast_lock deferred_lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&p->timer) &br->multicast_lock nl_table_lock irq_context: softirq (&p->timer) &br->multicast_lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock nl_table_wait.lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&p->timer) &br->multicast_lock &base->lock irq_context: softirq (&p->timer) &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock &c->lock irq_context: softirq (&p->timer) &br->multicast_lock &n->list_lock irq_context: softirq (&p->timer) &br->multicast_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &fp->aux->used_maps_mutex &rq->__lock irq_context: 0 &fp->aux->used_maps_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex br_ioctl_mutex.wait_lock irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 br_ioctl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock quarantine_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 cb_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 pcpu_alloc_mutex stock_lock irq_context: 0 pcpu_alloc_mutex pcpu_lock stock_lock irq_context: 0 pcpu_alloc_mutex pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_PIMREG#2 irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu &c->lock irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock init_task.mems_allowed_seq.seqcount irq_context: 0 &group->inotify_data.idr_lock &obj_hash[i].lock pool_lock irq_context: 0 br_ioctl_mutex rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex krc.lock &base->lock irq_context: 0 br_ioctl_mutex rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#9 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &stopper->lock irq_context: 0 sk_lock-AF_CAN free_vmap_area_lock irq_context: 0 sk_lock-AF_CAN vmap_area_lock irq_context: 0 sk_lock-AF_CAN stock_lock irq_context: 0 sk_lock-AF_CAN pcpu_alloc_mutex irq_context: 0 sk_lock-AF_CAN pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_CAN pcpu_alloc_mutex &rq->__lock irq_context: 0 sk_lock-AF_CAN pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN pcpu_alloc_mutex.wait_lock irq_context: 0 sk_lock-AF_CAN &p->pi_lock irq_context: 0 sk_lock-AF_CAN &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_CAN &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_CAN rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) quarantine_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu pool_lock#2 irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &meta->lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events &rq->__lock irq_context: 0 (wq_completion)events &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (shepherd).work &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &tsk->futex_exit_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &token_hash[i].lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &token_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_writers#4 &base->lock irq_context: 0 sb_writers#4 sb_writers#4 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex bit_wait_table + i irq_context: 0 &sb->s_type->i_mutex_key#19 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#19 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &f->f_lock irq_context: 0 sk_lock-AF_PACKET &f->f_lock fasync_lock irq_context: 0 sk_lock-AF_PACKET &f->f_lock fasync_lock &new->fa_lock irq_context: 0 sk_lock-AF_PACKET &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem pcpu_lock stock_lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#4 &ei->xattr_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &ei->xattr_sem &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock lock#8 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock &sch->q.lock irq_context: softirq rcu_read_lock &sch->q.lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock krc.lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem krc.lock &base->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 batched_entropy_u8.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 kfence_freelist_lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock stock_lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &list->lock#31 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &base->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM wlock-AF_NETROM irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &list->lock#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) krc.lock &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK slock-AF_NETLINK irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 &pipe->mutex/1 slock-AF_NETLINK irq_context: 0 &pipe->mutex/1 free_vmap_area_lock irq_context: 0 &pipe->mutex/1 vmap_area_lock irq_context: 0 &pipe->mutex/1 init_mm.page_table_lock irq_context: 0 &pipe->mutex/1 &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &zone->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount#7 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &ct->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 __ip_vs_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 __ip_vs_mutex console_owner_lock irq_context: 0 __ip_vs_mutex console_owner irq_context: 0 clock-AF_INET irq_context: 0 cb_lock genl_mutex &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &dd->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle bit_wait_table + i irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &xa->xa_lock#6 &pcp->lock &zone->lock irq_context: 0 &xa->xa_lock#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &xa->xa_lock#6 &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex &meta->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &c->lock#2 irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_node_0 irq_context: 0 &sb->s_type->i_lock_key#14 bit_wait_table + i irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 loop_validate_mutex pool_lock#2 irq_context: 0 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 proto_tab_lock &n->list_lock irq_context: 0 proto_tab_lock &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_node_0 irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &xs->mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xs->mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &xs->mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock kfence_freelist_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock nl_table_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock &meta->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: 0 sb_writers#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &p->lock &of->mutex kn->active#5 pgd_lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 key irq_context: 0 &p->lock &of->mutex kn->active#5 pcpu_lock irq_context: 0 &p->lock &of->mutex kn->active#5 percpu_counters_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex pool_lock#2 irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pcpu_lock stock_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &sdata->sec_mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &sdata->sec_mtx &sec->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &data->lock irq_context: 0 sk_lock-AF_TIPC &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &p->pi_lock irq_context: 0 &smc->clcsock_release_lock ebt_mutex irq_context: 0 &smc->clcsock_release_lock ebt_mutex &mm->mmap_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page) irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &data->lock irq_context: 0 rtnl_mutex &block->cb_lock &rq->__lock irq_context: 0 rtnl_mutex &block->cb_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock &sighand->siglock irq_context: 0 &pipe->mutex/1 &sighand->siglock irq_context: 0 &pipe->mutex/1 &sighand->siglock stock_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock pool_lock#2 irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock stock_lock irq_context: 0 tasklist_lock &sighand->siglock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &meta->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC kfence_freelist_lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &c->lock irq_context: softirq net/netrom/nr_loopback.c:18 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 &n->list_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex &x->wait#26 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 &n->list_lock irq_context: 0 &fq->mq_flush_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &n->list_lock &c->lock irq_context: 0 &fq->mq_flush_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock &wq#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#17 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock crngs.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock pool_lock irq_context: 0 &pipe->mutex/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&sk->sk_timer)#2 irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM irq_context: softirq (&sk->sk_timer)#2 nr_list_lock irq_context: softirq (&sk->sk_timer)#2 &obj_hash[i].lock irq_context: softirq (&sk->sk_timer)#2 wlock-AF_NETROM irq_context: softirq (&sk->sk_timer)#2 &list->lock#22 irq_context: softirq (&sk->sk_timer)#2 rlock-AF_NETROM irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 irq_context: 0 &lruvec->lru_lock irq_context: 0 &info->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 &pipe->mutex/1 rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu irq_context: 0 &pipe->mutex/1 remove_cache_srcu quarantine_lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &c->lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &n->list_lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink kfence_freelist_lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &____s->seqcount#2 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#17 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#17 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex rcu_state.exp_wake_mutex.wait_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex.wait_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &net->packet.sklist_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq rcu_read_lock hwsim_radio_lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &pl->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &pl->lock key#12 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &wb->work_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 remove_cache_srcu pool_lock#2 irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &lock->wait_lock irq_context: 0 &pipe->mutex/1 &p->pi_lock irq_context: 0 &pipe->mutex/1 &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &____s->seqcount#2 irq_context: 0 &ei->i_data_sem &mapping->private_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &stop_pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &meta->lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock (console_sem).lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock quarantine_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 &zone->lock irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 &____s->seqcount irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 &xs->mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 tasklist_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start pgd_lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start stock_lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start key irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start pcpu_lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start percpu_counters_lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start pcpu_lock stock_lock irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#12 &n->list_lock irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#12 &n->list_lock &c->lock irq_context: 0 pid_caches_mutex slab_mutex &____s->seqcount#2 irq_context: 0 pid_caches_mutex slab_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx fs_reclaim &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#12 irq_context: 0 &mm->mmap_lock &xa->xa_lock#12 pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &s->s_inode_list_lock irq_context: 0 &mm->mmap_lock batched_entropy_u32.lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_node_0 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: softirq (&peer->hb_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &xs->mutex &mm->mmap_lock &sem->wait_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 rtnl_mutex ifalias_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem stock_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 pernet_ops_rwsem &ht->mutex quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start pgd_lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start key irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start pcpu_lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start percpu_counters_lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &n->list_lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock rcu_read_lock &sighand->siglock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &newf->file_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock krc.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: softirq rcu_callback &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &n->list_lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &n->list_lock &c->lock irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex css_set_lock irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex fs_reclaim irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex fs_reclaim &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex &c->lock irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex &base->lock irq_context: 0 &p->lock &of->mutex kn->active#55 &cgrp->pidlist_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &wb->list_lock irq_context: 0 sb_writers#11 &wb->list_lock &sb->s_type->i_lock_key#31 irq_context: 0 bpf_stats_enabled_mutex &n->list_lock irq_context: 0 bpf_stats_enabled_mutex &n->list_lock &c->lock irq_context: 0 bpf_stats_enabled_mutex key#5 irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 crypto_default_null_skcipher_lock crypto_alg_sem irq_context: 0 crypto_default_null_skcipher_lock fs_reclaim irq_context: 0 crypto_default_null_skcipher_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crypto_default_null_skcipher_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 crypto_default_null_skcipher_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 crypto_default_null_skcipher_lock pool_lock#2 irq_context: 0 &mm->mmap_lock pcpu_lock stock_lock irq_context: softirq &(&l->destroy_dwork)->timer irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cgroup_pidlist_destroy irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex pool_lock#2 irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex &meta->lock irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_send_keepalive) &____s->seqcount#2 irq_context: softirq (&peer->timer_send_keepalive) &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &n->list_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &lock->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 link_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 link_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 link_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/2 &batadv_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/2 &batadv_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh key#21 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &list->lock#5 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &data->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &data->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET pcpu_lock irq_context: 0 sk_lock-AF_PACKET pcpu_lock stock_lock irq_context: 0 sk_lock-AF_PACKET purge_vmap_area_lock irq_context: 0 sk_lock-AF_PACKET purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET purge_vmap_area_lock pool_lock#2 irq_context: 0 sk_lock-AF_PACKET rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &data->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &data->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &data->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &data->lock &base->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mount_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM fill_pool_map-wait-type-override &c->lock irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 sb_writers#4 sb_internal quarantine_lock irq_context: 0 sb_writers#4 sb_internal &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 free_vmap_area_lock &____s->seqcount irq_context: 0 free_vmap_area_lock &pcp->lock &zone->lock irq_context: 0 free_vmap_area_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock clock-AF_INET irq_context: 0 sk_lock-AF_INET rcu_read_lock clock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &stab->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &stab->lock &psock->link_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &f->f_owner.lock irq_context: 0 nfnl_subsys_cttimeout fs_reclaim irq_context: 0 nfnl_subsys_cttimeout fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_cttimeout pool_lock#2 irq_context: 0 nfnl_subsys_cttimeout &obj_hash[i].lock irq_context: 0 nfnl_subsys_cttimeout nf_conntrack_mutex irq_context: 0 nfnl_subsys_cttimeout nf_conntrack_mutex &rq->__lock irq_context: 0 nfnl_subsys_cttimeout nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_cttimeout nf_conntrack_mutex nf_conntrack_mutex.wait_lock irq_context: 0 nfnl_subsys_cttimeout nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 nfnl_subsys_cttimeout krc.lock irq_context: 0 (wq_completion)wg-kex-wg1#19 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &____s->seqcount#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &stab->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &stab->lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &psock->ingress_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex pgd_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex key irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex percpu_counters_lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 &meta->lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &xs->mutex &lock->wait_lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 rtnl_mutex &xs->mutex remove_cache_srcu irq_context: 0 rtnl_mutex &xs->mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &xs->mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex &xs->mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex &xs->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &xs->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &xs->mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &dev_addr_list_lock_key/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 &dev_addr_list_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 &dev_addr_list_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#46 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&port->wq) irq_context: 0 (wq_completion)events (work_completion)(&port->wq) &list->lock#46 irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#46 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#46 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex device_links_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#46 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&port->wq) rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#46 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock irq_context: 0 &dir->lock quarantine_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock pool_lock irq_context: 0 cgroup_threadgroup_rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex rcu_read_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex.wait_lock irq_context: 0 epnested_mutex &ep->mtx &p->pi_lock irq_context: 0 epnested_mutex &ep->mtx &p->pi_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex remove_cache_srcu irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx &ep->lock &ep->wq irq_context: 0 epnested_mutex &ep->mtx &ep->lock &ep->wq &p->pi_lock irq_context: 0 epnested_mutex &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx &ep->poll_wait &ep->lock irq_context: 0 epnested_mutex &lock->wait_lock irq_context: 0 epnested_mutex &p->pi_lock irq_context: 0 epnested_mutex &p->pi_lock &rq->__lock irq_context: 0 epnested_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 rcu_read_lock &n->lock &data->lock irq_context: 0 epnested_mutex &ep->mtx lock kernfs_idr_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->mutex &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 rtnl_mutex &tbl->lock &n->lock &data->lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &stopper->lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nf_log_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_log_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &table->lock#4 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &table->lock#4 &table->rwlock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &table->lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &table->lock#4 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &table->lock#4 &device->event_handler_rwsem irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &table->lock#4 &device->event_handler_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &table->lock#4 &device->event_handler_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 key#24 irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex.wait_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &table->lock#4 &device->event_handler_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 nfnl_subsys_cttimeout nf_conntrack_mutex rcu_node_0 irq_context: 0 fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem &ht->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &cfs_rq->removed.lock irq_context: 0 nfnl_subsys_cttimeout &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC kfence_freelist_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[2] irq_context: 0 rcu_state.exp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_callback &dir->lock &obj_hash[i].lock irq_context: softirq rcu_callback &dir->lock pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex rcu_node_0 irq_context: 0 rtnl_mutex &wg->device_update_lock &list->lock#17 &data->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &list->lock#17 &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock &list->lock#17 pool_lock#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &lock->wait_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &pool->lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#10 &____s->seqcount#2 irq_context: 0 rtnl_mutex team->team_lock_key#10 &____s->seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &base->lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &base->lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock kfence_freelist_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &meta->lock irq_context: 0 fill_pool_map-wait-type-override &rcu_state.expedited_wq irq_context: softirq rcu_read_lock rcu_read_lock &zone->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &list->lock#19 irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#6 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy25 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy26 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) pool_lock#2 irq_context: 0 krc.lock &obj_hash[i].lock irq_context: 0 krc.lock &base->lock irq_context: 0 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &p->pi_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->event_handler_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->event_handler_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tomoyo_ss mount_lock irq_context: 0 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->event_handler_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem key#25 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 &head->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock pool_lock irq_context: 0 ppp_mutex rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 __ip_vs_mutex nf_hook_mutex stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events free_ipc_work &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 &root->kernfs_rwsem rcu_node_0 irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex smc_ib_devices.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work &base->lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem devices_rwsem irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET irq_context: 0 rtnl_mutex sk_lock-AF_INET slock-AF_INET irq_context: 0 rtnl_mutex slock-AF_INET irq_context: 0 rtnl_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pf->rwait irq_context: 0 &ppp->rlock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 &base->lock irq_context: 0 lock#3 &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock quarantine_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC &srv->idr_lock irq_context: 0 sk_lock-AF_TIPC &srv->idr_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 sk_lock-AF_TIPC &con->sub_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &srv->idr_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &con->outqueue_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_send irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->outqueue_lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) pool_lock#2 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &list->lock#32 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &list->lock#32 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->outqueue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->outqueue_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tipc_net(net)->bclock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &c->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &list->lock#21 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &list->lock#21 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &c->lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &n->list_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &n->list_lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &c->lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &c->lock irq_context: 0 sk_lock-AF_TIPC &____s->seqcount#2 irq_context: 0 sk_lock-AF_TIPC &____s->seqcount irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &____s->seqcount irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &con->sub_lock &tn->nametbl_lock irq_context: 0 sk_lock-AF_TIPC &con->sub_lock &tn->nametbl_lock &service->lock irq_context: 0 sk_lock-AF_TIPC &con->sub_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &con->sub_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &con->outqueue_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &c->lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &srv->idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock &tn->nametbl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock &tn->nametbl_lock &service->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->outqueue_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &srv->idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &con->outqueue_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tipc_net(net)->bclock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &srv->idr_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &srv->idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 ppp_mutex rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &sdata->sec_mtx &sec->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &sdata->sec_mtx &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rlock-AF_NETLINK irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &root->kernfs_iattr_rwsem rcu_node_0 irq_context: 0 &root->kernfs_iattr_rwsem &rcu_state.expedited_wq irq_context: 0 &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_node_0 irq_context: 0 &pipe->mutex/1 &base->lock irq_context: 0 &pipe->mutex/1 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#58 &rq->__lock irq_context: 0 kn->active#58 fs_reclaim irq_context: 0 kn->active#58 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#58 stock_lock irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 irq_context: 0 &f->f_pos_lock sb_writers#11 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (work_completion)(&data->gc_work) irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem &rnp->exp_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_TIPC &n->list_lock irq_context: 0 sk_lock-AF_TIPC &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem.waiters.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock cgroup_file_kn_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem &p->alloc_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem &p->alloc_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem cpuset_attach_wq.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 &f->f_pos_lock sb_writers#11 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 &xa->xa_lock#12 irq_context: 0 &type->i_mutex_dir_key#7 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock &wq#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &n->list_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 &net->xfrm.xfrm_policy_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xfrm.xfrm_policy_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &policy->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#33 irq_context: 0 &smc->clcsock_release_lock irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock nf_sockopt_mutex irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex irq_context: 0 sk_lock-AF_TIPC &srv->idr_lock &c->lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &n->list_lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount irq_context: 0 misc_mtx remove_cache_srcu irq_context: 0 misc_mtx remove_cache_srcu quarantine_lock irq_context: 0 misc_mtx remove_cache_srcu &c->lock irq_context: 0 misc_mtx remove_cache_srcu &n->list_lock irq_context: 0 misc_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 misc_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 misc_mtx remove_cache_srcu &rq->__lock irq_context: 0 misc_mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh &r->producer_lock#4 irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &r->producer_lock#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &sch->q.lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pcpu_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 css_set_lock cgroup_file_kn_lock irq_context: softirq rcu_callback css_set_lock irq_context: softirq rcu_callback css_set_lock &obj_hash[i].lock irq_context: softirq rcu_callback css_set_lock pool_lock#2 irq_context: softirq rcu_callback css_set_lock krc.lock irq_context: 0 &sb->s_type->i_mutex_key#9 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &n->list_lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &n->list_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &____s->seqcount irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &sem->wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cgroup_mutex.wait_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 kn->active#58 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex fs_reclaim irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex pool_lock#2 irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &zone->lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &____s->seqcount irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex stock_lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &c->lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &sb->s_type->i_lock_key#8 irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &dir->lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &obj_hash[i].lock pool_lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &c->lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET pool_lock#2 irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &in_dev->mc_tomb_lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &im->lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &base->lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex kthread_create_lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &p->pi_lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &x->wait irq_context: 0 &smc->clcsock_release_lock rtnl_mutex ipvs->sync_mutex &ipvs->sync_buff_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 key#15 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates irq_context: 0 kn->active#58 &n->list_lock irq_context: 0 kn->active#58 &n->list_lock &c->lock irq_context: 0 kn->active#58 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &c->lock irq_context: 0 kn->active#58 fs_reclaim &rq->__lock irq_context: 0 kn->active#58 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#6 irq_context: softirq rcu_read_lock dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_ib_devices_lock irq_context: 0 remove_cache_srcu &____s->seqcount irq_context: 0 rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &n->list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &lo->lo_mutex &rq->__lock irq_context: 0 &disk->open_mutex &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#58 &____s->seqcount#2 irq_context: 0 kn->active#58 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&timer) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnettable->lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rlock-AF_INET irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rlock-AF_INET irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 key#15 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#48 irq_context: 0 &type->s_umount_key#48 shrinker_rwsem irq_context: 0 &type->s_umount_key#48 rename_lock.seqcount irq_context: 0 &type->s_umount_key#48 &dentry->d_lock irq_context: 0 &type->s_umount_key#48 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#48 &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#48 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#48 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#48 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#48 &xa->xa_lock#6 irq_context: 0 &type->s_umount_key#48 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#48 pool_lock#2 irq_context: 0 &type->s_umount_key#48 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#48 binderfs_minors_mutex irq_context: 0 &type->s_umount_key#48 binderfs_minors_mutex binderfs_minors.xa_lock irq_context: 0 &type->s_umount_key#48 sb_lock irq_context: 0 (work_completion)(&data->suspend_work) irq_context: 0 &hdev->unregister_lock irq_context: 0 hci_dev_list_lock irq_context: 0 (work_completion)(&hdev->power_on) irq_context: 0 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (work_completion)(&hdev->reenable_adv_work) irq_context: 0 &hdev->cmd_sync_work_lock irq_context: 0 &hdev->req_lock (work_completion)(&(&hdev->interleave_scan)->work) irq_context: 0 &hdev->req_lock hci_dev_list_lock irq_context: 0 &hdev->req_lock (work_completion)(&hdev->tx_work) irq_context: 0 &hdev->req_lock rcu_read_lock rcu_node_0 irq_context: 0 &hdev->req_lock rcu_read_lock &rq->__lock irq_context: 0 &hdev->req_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock (work_completion)(&hdev->rx_work) irq_context: 0 &hdev->req_lock &wq->mutex irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &hdev->req_lock &wq->mutex &pool->lock/1 irq_context: 0 &hdev->req_lock &wq->mutex &x->wait#10 irq_context: 0 &hdev->req_lock &hdev->lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &list->lock#13 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock (work_completion)(&(&conn->id_addr_timer)->work) irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &rnp->exp_lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock sysctl_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock &sb->s_type->i_lock_key#23 irq_context: 0 (wq_completion)events free_ipc_work &sb->s_type->i_lock_key#23 irq_context: 0 (wq_completion)events free_ipc_work &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work rename_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &dentry->d_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &lru->node[i].lock irq_context: 0 (wq_completion)events free_ipc_work &s->s_inode_list_lock irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#6 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock pool_lock#2 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock (work_completion)(&(&conn->info_timer)->work) irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock irq_context: 0 &hdev->req_lock &hdev->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock &base->lock irq_context: 0 &hdev->req_lock &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock (work_completion)(&(&conn->disc_work)->work) irq_context: 0 &hdev->req_lock &hdev->lock (work_completion)(&(&conn->auto_accept_work)->work) irq_context: 0 &hdev->req_lock &hdev->lock (work_completion)(&(&conn->idle_work)->work) irq_context: 0 &hdev->req_lock &hdev->lock &list->lock#12 irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &hdev->req_lock &hdev->lock &k->k_lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem irq_context: 0 &hdev->req_lock &hdev->lock dev_pm_qos_sysfs_mtx irq_context: 0 &hdev->req_lock &hdev->lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &hdev->req_lock &hdev->lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &hdev->req_lock &hdev->lock dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem pool_lock#2 irq_context: 0 &hdev->req_lock &hdev->lock kernfs_idr_lock irq_context: 0 &hdev->req_lock &hdev->lock pool_lock#2 irq_context: 0 &hdev->req_lock &hdev->lock &k->k_lock klist_remove_lock irq_context: 0 &hdev->req_lock &hdev->lock &k->list_lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock sysfs_symlink_target_lock irq_context: 0 &hdev->req_lock &hdev->lock subsys mutex#80 irq_context: 0 &hdev->req_lock &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 &hdev->req_lock &hdev->lock subsys mutex#80 &k->k_lock klist_remove_lock irq_context: 0 &hdev->req_lock &hdev->lock &x->wait#9 irq_context: 0 &hdev->req_lock &hdev->lock dpm_list_mtx irq_context: 0 &hdev->req_lock &hdev->lock &dev->power.lock irq_context: 0 &hdev->req_lock &hdev->lock deferred_probe_mutex irq_context: 0 &hdev->req_lock &hdev->lock device_links_lock irq_context: 0 &hdev->req_lock &hdev->lock mmu_notifier_invalidate_range_start irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &hdev->req_lock &hdev->lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 batched_entropy_u8.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 kfence_freelist_lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &n->list_lock &c->lock irq_context: 0 &hdev->req_lock tk_core.seq.seqcount irq_context: 0 &hdev->req_lock hci_sk_list.lock irq_context: 0 &hdev->req_lock &list->lock#11 irq_context: 0 &hdev->req_lock (work_completion)(&hdev->cmd_work) irq_context: 0 &hdev->req_lock (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 &hdev->lock irq_context: 0 &hdev->lock fs_reclaim irq_context: 0 &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &hdev->lock &c->lock irq_context: 0 &hdev->lock &n->list_lock irq_context: 0 &hdev->lock &n->list_lock &c->lock irq_context: 0 &hdev->lock &rq->__lock irq_context: 0 &hdev->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock pool_lock#2 irq_context: 0 &hdev->lock tk_core.seq.seqcount irq_context: 0 &hdev->lock hci_sk_list.lock irq_context: 0 &hdev->lock &obj_hash[i].lock irq_context: 0 hci_sk_list.lock irq_context: 0 (work_completion)(&rfkill->uevent_work) irq_context: 0 (work_completion)(&rfkill->sync_work) irq_context: 0 subsys mutex#40 irq_context: 0 subsys mutex#40 &rq->__lock irq_context: 0 subsys mutex#40 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 subsys mutex#40 &k->k_lock irq_context: 0 subsys mutex#40 &k->k_lock klist_remove_lock irq_context: 0 &rfkill->lock irq_context: 0 uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 uevent_sock_mutex pool_lock#2 irq_context: 0 uevent_sock_mutex nl_table_lock irq_context: 0 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dentry->d_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &lru->node[i].lock irq_context: 0 uevent_sock_mutex nl_table_wait.lock irq_context: 0 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 subsys mutex#80 irq_context: 0 subsys mutex#80 &k->k_lock irq_context: 0 subsys mutex#80 &k->k_lock klist_remove_lock irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 hci_index_ida.xa_lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_NETROM irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &cfs_rq->removed.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)phy15 irq_context: 0 (wq_completion)phy15 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy15 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock &c->lock irq_context: 0 (wq_completion)phy16 irq_context: 0 (wq_completion)phy16 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy16 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &type->s_umount_key#46/1 &n->list_lock irq_context: 0 &type->s_umount_key#46/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock &c->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#55 &n->list_lock irq_context: 0 kn->active#55 &n->list_lock &c->lock irq_context: 0 nf_hook_mutex &n->list_lock irq_context: 0 nf_hook_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex key#26 irq_context: softirq rcu_callback key#26 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 link_idr_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rhashtable_bucket irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex (work_completion)(&ht->run_work) irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#3 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_LOOPBACK#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_TUNNEL#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_IPGRE#2 irq_context: 0 rtnl_mutex _xmit_IPGRE &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_IPGRE pool_lock#2 irq_context: 0 rtnl_mutex _xmit_IPGRE krc.lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_ib_devices_lock &pool->flush_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock &n->lock &____s->seqcount irq_context: 0 rcu_read_lock &n->lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 quarantine_lock irq_context: 0 tracepoints_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock &base->lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock pool_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_TUNNEL6#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_SIT#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_NONE#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#4 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (work_completion)(&(&bond->mii_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->arp_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->alb_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->ad_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->slave_arr_work)->work) irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER irq_context: 0 __ip_vs_mutex nf_hook_mutex pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 krc.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#3 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev->tx_global_lock &batadv_netdev_xmit_lock_key irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 krc.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock (&peer->timer_retransmit_handshake) irq_context: 0 rtnl_mutex &wg->device_update_lock &base->lock irq_context: 0 rtnl_mutex &wg->device_update_lock (&peer->timer_send_keepalive) irq_context: 0 rtnl_mutex &wg->device_update_lock (&peer->timer_new_handshake) irq_context: 0 rtnl_mutex &wg->device_update_lock (&peer->timer_zero_key_material) irq_context: 0 rtnl_mutex &wg->device_update_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock (&peer->timer_persistent_keepalive) irq_context: 0 rtnl_mutex &wg->device_update_lock (work_completion)(&peer->clear_peer_work) irq_context: 0 rtnl_mutex &wg->device_update_lock &handshake->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &handshake->lock &table->lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex &r->consumer_lock#2 irq_context: 0 rtnl_mutex &wg->socket_update_lock irq_context: 0 rtnl_mutex k-clock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) init_task.mems_allowed_seq.seqcount irq_context: 0 __ip_vs_mutex nf_hook_mutex &rq->__lock irq_context: 0 __ip_vs_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex &obj_hash[i].lock irq_context: 0 __ip_vs_mutex nf_hook_mutex &c->lock irq_context: 0 __ip_vs_mutex &obj_hash[i].lock pool_lock irq_context: 0 __ip_vs_mutex (&tbl->periodic_timer) irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex _xmit_ETHER/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/1 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/1 krc.lock irq_context: 0 rtnl_mutex &tbl->lock &n->list_lock irq_context: 0 rtnl_mutex &tbl->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 kfence_freelist_lock irq_context: 0 rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 &bat_priv->softif_vlan_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 krc.lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &base->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock quarantine_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 sb_writers#3 &c->lock irq_context: 0 sb_writers#3 &n->list_lock irq_context: 0 sb_writers#3 &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pgd_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex key irq_context: 0 kn->active#18 &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex percpu_counters_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex netpoll_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pn->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_NONE#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sch->q.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex __ip_vs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &im->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex flowtable_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: softirq rcu_callback &n->list_lock irq_context: softirq rcu_callback &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_TUNNEL6#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_SIT#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_TUNNEL#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock krc.lock irq_context: softirq rcu_callback batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_IPGRE#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &im->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pmc->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &im->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex (console_sem).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex &x->wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &table->hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &table->hash[i].lock &table->hash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pin_fs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (inetaddr_chain).rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem inet6addr_chain.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &wdev->pmsr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 sb_writers#3 tomoyo_ss &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#18 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&wdev->disconnect_wk) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&wdev->pmsr_free_wk) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&sdata->activate_links_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->sta_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (&local->dynamic_ps_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&sdata->recalc_smps) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->color_change_finalize_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->dfs_cac_timer_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &list->lock#18 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &list->lock#19 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->dev_wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &fq->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &fq->lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &fq->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &fq->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &fq->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &fq->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &local->iflist_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx cpu_hotplug_lock &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx bpf_devs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &in_dev->mc_tomb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx sysctl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx class irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (&tbl->proxy_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &ul->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &net->xdp.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx mirred_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &tn->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &net->ipv6.addrconf_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &ndev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_query_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_report_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &pnn->pndevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &pnn->routes.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx smc_ib_devices.mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx target_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &s->s_inode_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->color_collision_detect_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &local->chanctx_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx deferred_probe_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx cpu_hotplug_lock xps_map_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &rdev->mgmt_registrations_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&local->roc_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&local->restart_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&local->sched_scan_stopped_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&local->radar_detected_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &list->lock#19 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rdev->wiphy.mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rfkill->uevent_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rfkill->sync_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#40 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#40 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#40 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dpm_list_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem deferred_probe_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rfkill->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rfkill_global_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem triggers_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem leds_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem leds_list_lock &led_cdev->trigger_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx subsys mutex#56 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx subsys mutex#56 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx subsys mutex#56 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rdev->wiphy_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rdev->conn_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rdev->event_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&rdev->dfs_update_channels_wk)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&rdev->background_cac_done_wk)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rdev->destroy_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rdev->propagate_radar_detect_wk) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rdev->propagate_cac_done_wk) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rdev->mgmt_registrations_update_wk) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rdev->background_cac_abort_wk) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#55 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#55 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#55 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem gdp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&local->sta_cleanup) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ipvlan->addrs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ipvlan->addrs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ipvlan->addrs_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ipvlan->addrs_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &xa->xa_lock#13 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &app->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&app->join_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&app->periodic_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &list->lock#14 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&app->join_timer)#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &app->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &list->lock#15 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&priv->scan_result)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&priv->connect)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&hsr->prune_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&hsr->announce_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pin_fs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &xa->xa_lock#6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mount_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (console_sem).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &entry->crc_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex key#19 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bat_priv->forw_bcast_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&forw_packet_aggr->delayed_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock deferred_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock (console_sem).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->hash_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->hash_lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock deferred_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &br->multicast_lock &n->list_lock &c->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&pmctx->ip6_mc_router_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&pmctx->ip4_mc_router_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex deferred_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock lweventlist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&br->gc_work)->work) irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex j1939_netdev_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock &batadv_netdev_xmit_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#4 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_LOOPBACK#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 &sch->q.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &table->hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 dup_mmap_sem &mm->mmap_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-sk_lock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macsec_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER (console_sem).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macvlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex lweventlist_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex lweventlist_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &list->lock#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock (&peer->timer_retransmit_handshake) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock (&peer->timer_send_keepalive) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock (&peer->timer_new_handshake) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock (&peer->timer_zero_key_material) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock (&peer->timer_persistent_keepalive) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &handshake->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &r->consumer_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->socket_update_lock irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_node_0 irq_context: 0 (wq_completion)events (debug_obj_work).work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (debug_obj_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &nn->netlink_tap_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&bond->arp_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&bond->alb_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&bond->ad_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&bond->slave_arr_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&br->hello_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&br->topology_change_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&br->tcn_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&brmctx->ip4_mc_router_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&brmctx->ip4_other_query.timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&brmctx->ip4_own_query.timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&brmctx->ip6_mc_router_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&brmctx->ip6_other_query.timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&brmctx->ip6_own_query.timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bridge_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bridge_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bridge_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macsec_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macsec_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macvlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &macvlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fib_info_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &n->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock rcu_node_0 irq_context: 0 &p->lock &rcu_state.expedited_wq irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex raw_notifier_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex bcm_notifier_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex isotp_notifier_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &batadv_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &batadv_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &batadv_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex req_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &x->wait#11 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &fsnotify_mark_srcu irq_context: 0 &type->i_mutex_dir_key/1 &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key/1 &sbinfo->stat_lock irq_context: 0 &type->i_mutex_dir_key/1 &xa->xa_lock#6 irq_context: 0 &type->i_mutex_dir_key/1 &dentry->d_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#81 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#81 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tap_major->minor_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&port->bc_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &ipvlan->addrs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#3/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&port->wq) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &k->k_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &bond->ipsec_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex crngs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_NETROM#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mrt_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &wg->socket_update_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &table->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock (&peer->timer_retransmit_handshake) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock (&peer->timer_send_keepalive) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock (&peer->timer_new_handshake) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock (&peer->timer_zero_key_material) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock (&peer->timer_persistent_keepalive) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock (work_completion)(&peer->clear_peer_work) irq_context: 0 pernet_ops_rwsem rcu_read_lock pgd_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &wq->mutex &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem rcu_read_lock key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &table->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &list->lock#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock wq_pool_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock wq_mayday_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &p->pi_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wg->device_update_lock &x->wait irq_context: 0 pernet_ops_rwsem rcu_read_lock percpu_counters_lock irq_context: 0 pernet_ops_rwsem __ip_vs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex &base->lock irq_context: 0 __ip_vs_mutex &base->lock &obj_hash[i].lock irq_context: 0 __ip_vs_mutex &svc->sched_lock irq_context: 0 __ip_vs_mutex krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#13 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#14 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: softirq rcu_callback stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &lruvec->lru_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond0#7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx batched_entropy_u8.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock &list->lock#19 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &list->lock#19 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rdev->bss_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)phy8 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &list->lock#19 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock irq_context: 0 tasklist_lock &base->lock irq_context: 0 tasklist_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rdev->bss_lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)phy12 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 quarantine_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex klist_remove_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &k->k_lock klist_remove_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock deferred_probe_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock device_links_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &____s->seqcount#2 irq_context: 0 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 pcpu_alloc_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex nf_hook_mutex.wait_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex.wait_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#10 &c->lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &base->lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock pool_lock#2 irq_context: softirq (&asoc->timers[i]) irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 pool_lock#2 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &list->lock#27 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 &c->lock irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 &____s->seqcount#2 irq_context: softirq (&asoc->timers[i]) slock-AF_INET6 &____s->seqcount irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock &meta->lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex pool_lock irq_context: 0 (wq_completion)events (debug_obj_work).work &meta->lock irq_context: 0 (wq_completion)events (debug_obj_work).work kfence_freelist_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock kfence_freelist_lock irq_context: softirq &(&conn->disc_work)->timer irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER krc.lock &base->lock irq_context: 0 rtnl_mutex _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex pgd_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex key irq_context: 0 rtnl_mutex rcu_state.exp_mutex pcpu_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex percpu_counters_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) &meta->lock irq_context: softirq (&n->timer) kfence_freelist_lock irq_context: 0 rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 rtnl_mutex &net->xfrm.xfrm_policy_lock irq_context: softirq (&n->timer) icmp_global.lock batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock kfence_freelist_lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &wdev->pmsr_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &rdev->mgmt_registrations_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &local->key_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->color_collision_detect_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &base->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &br->lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &br->lock &____s->seqcount irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &tbl->lock &obj_hash[i].lock pool_lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#7 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &base->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &tbl->lock quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#7 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock krc.lock &base->lock irq_context: 0 rtnl_mutex rcu_read_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex lweventlist_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex lweventlist_lock kfence_freelist_lock irq_context: 0 rtnl_mutex _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock &base->lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg0#7 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rcu_state.expedited_wq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: softirq init_task.mems_allowed_seq.seqcount irq_context: 0 rtnl_mutex &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock krc.lock &base->lock irq_context: 0 rtnl_mutex &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy15 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy16 irq_context: 0 pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_node_0 irq_context: 0 cb_lock &rcu_state.expedited_wq irq_context: 0 cb_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &meta->lock irq_context: 0 pernet_ops_rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_wq[2] irq_context: 0 (wq_completion)events free_ipc_work &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle key#4 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh noop_qdisc.busylock irq_context: 0 rcu_read_lock rcu_read_lock_bh noop_qdisc.busylock noop_qdisc.q.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock kfence_freelist_lock irq_context: 0 &rnp->exp_wq[2] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_node_0 irq_context: 0 (wq_completion)events free_ipc_work pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock &c->lock irq_context: 0 &mm->mmap_lock batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock irq_context: softirq rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &rq->__lock &base->lock irq_context: softirq rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pcpu_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &xa->xa_lock#6 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &xa->xa_lock#6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &xa->xa_lock#6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &info->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock mount_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &wb->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &xa->xa_lock#6 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &xa->xa_lock#6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &sem->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_wq[1] irq_context: 0 (wq_completion)events free_ipc_work per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &c->lock irq_context: 0 rtnl_mutex &block->lock irq_context: 0 rtnl_mutex &block->cb_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex (work_completion)(&q->work) irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#12 &____s->seqcount#2 irq_context: 0 &type->s_umount_key#22/1 &xa->xa_lock#12 &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#6 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_ct_proto_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock mapping.invalidate_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock mapping.invalidate_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sem->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &xa->xa_lock#6 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &n->list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock hci_dev_list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock (console_sem).lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex slock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock fs_reclaim irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock rlock-AF_BLUETOOTH irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &c->lock irq_context: 0 sock_diag_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &h->lhash2[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rlock-AF_NETLINK irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_ra_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock console_owner_lock irq_context: 0 cb_lock console_owner irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &n->list_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex bpf_devs_lock irq_context: 0 sk_lock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF slock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF &obj_hash[i].lock irq_context: 0 slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &this->info_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF clock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF elock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &n->list_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &n->list_lock &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_mutex.wait_lock irq_context: 0 sock_diag_mutex &rq->__lock irq_context: 0 sock_diag_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: softirq (&pool->idle_timer) irq_context: softirq (&pool->idle_timer) &pool->lock/1 irq_context: softirq (&pool->idle_timer) &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock/1 &base->lock irq_context: softirq (&pool->idle_timer) &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &bgl->locks[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_node_0 irq_context: 0 &group->mark_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &zone->lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &sem->wait_lock irq_context: 0 sk_lock-AF_TIPC &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &p->pi_lock &cfs_rq->removed.lock irq_context: 0 nfnl_subsys_ipset fs_reclaim irq_context: 0 nfnl_subsys_ipset fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ipset fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 nfnl_subsys_ipset fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset pool_lock#2 irq_context: 0 nfnl_subsys_ipset &____s->seqcount irq_context: 0 nfnl_subsys_ipset stock_lock irq_context: 0 nfnl_subsys_ipset crngs.lock irq_context: 0 rtnl_mutex &tb->tb6_lock stock_lock irq_context: 0 &tfile->napi_mutex &c->lock irq_context: 0 &tfile->napi_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#13 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 &____s->seqcount#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 &____s->seqcount irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 pool_lock#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 &c->lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &base->lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_node_0 irq_context: 0 &tfile->napi_mutex rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &sem->wait_lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &base->lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key#23 irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock fs_reclaim irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 pool_lock#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rlock-AF_BLUETOOTH irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 unix_gc_lock irq_context: 0 misc_mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 misc_mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key bit_wait_table + i irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex __ip_vs_mutex __ip_vs_mutex.wait_lock irq_context: 0 rtnl_mutex __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex ip_vs_sched_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex fs_reclaim irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex stock_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex &c->lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex cpu_hotplug_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex fs_reclaim irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pcpu_alloc_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &sighand->siglock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &sighand->siglock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock stock_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &base->lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex fs_reclaim irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex kthread_create_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &x->wait irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &pool->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 __ip_vs_mutex irq_context: 0 __ip_vs_mutex __ip_vs_mutex.wait_lock irq_context: 0 __ip_vs_mutex &rq->__lock irq_context: 0 __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex.wait_lock irq_context: 0 &smc->clcsock_release_lock &p->pi_lock irq_context: 0 &smc->clcsock_release_lock &p->pi_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex __ip_vs_mutex.wait_lock irq_context: 0 &wq#4 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock &cfs_rq->removed.lock irq_context: 0 &smc->clcsock_release_lock &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock pool_lock#2 irq_context: 0 rcu_read_lock &s->lock irq_context: 0 rtnl_mutex bpf_devs_lock &rq->__lock irq_context: 0 sb_writers#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 __ip_vs_mutex ipvs->est_mutex irq_context: 0 __ip_vs_mutex (console_sem).lock irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner_lock irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_INET lock irq_context: 0 sk_lock-AF_INET lock sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET lock sctp_assocs_id_lock pool_lock#2 irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 __ip_vs_mutex rcu_node_0 irq_context: 0 __ip_vs_mutex &rcu_state.expedited_wq irq_context: 0 __ip_vs_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 __ip_vs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 __ip_vs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 __ip_vs_mutex fs_reclaim irq_context: 0 __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 __ip_vs_mutex pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 (console_sem).lock irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-slock-AF_INET irq_context: 0 &tfile->napi_mutex rcu_read_lock pool_lock#2 irq_context: 0 proto_tab_lock irq_context: 0 proto_tab_lock pool_lock#2 irq_context: 0 proto_tab_lock &dir->lock irq_context: 0 proto_tab_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NFC irq_context: 0 sk_lock-AF_NFC slock-AF_NFC irq_context: 0 sk_lock-AF_NFC &k->list_lock irq_context: 0 sk_lock-AF_NFC &k->k_lock irq_context: 0 sk_lock-AF_NFC llcp_devices_lock irq_context: 0 sk_lock-AF_NFC fs_reclaim irq_context: 0 sk_lock-AF_NFC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_NFC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_NFC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NFC pool_lock#2 irq_context: 0 sk_lock-AF_NFC &rq->__lock irq_context: 0 sk_lock-AF_NFC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NFC &local->sdp_lock irq_context: 0 sk_lock-AF_NFC &local->sdp_lock &local->sockets.lock irq_context: 0 sk_lock-AF_NFC &local->sockets.lock irq_context: 0 slock-AF_NFC irq_context: 0 sk_lock-AF_NFC &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC slock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC &local->sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#34 irq_context: 0 &sb->s_type->i_mutex_key#10 unix_gc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 unix_gc_lock unix_gc_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &disk->open_mutex rcu_node_0 irq_context: 0 &disk->open_mutex &rcu_state.expedited_wq irq_context: 0 &disk->open_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &disk->open_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &disk->open_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 proto_tab_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_RXRPC &local->services_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &local->services_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &____s->seqcount irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex &ei->xattr_sem &rq->__lock irq_context: 0 &iint->mutex &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 console_owner_lock irq_context: 0 sk_lock-AF_INET6 console_owner irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &base->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &hctx->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 cb_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_writers#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (kmod_concurrent_max).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &x->wait#17 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex running_helpers_waitq.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem kthread_create_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &x->wait irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &x->wait#21 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (&timer.timer) irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_default_null_skcipher_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock nl_table_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock nl_table_wait.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#8 &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#6 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: softirq &x->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&x->rtimer) irq_context: softirq (&x->rtimer) &x->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex hwsim_radio_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC tk_core.seq.seqcount irq_context: 0 sk_lock-AF_TIPC &list->lock#5 irq_context: 0 sk_lock-AF_TIPC pcpu_lock irq_context: 0 cb_lock genl_mutex stock_lock irq_context: 0 cb_lock genl_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 cb_lock genl_mutex &dir->lock irq_context: 0 cb_lock genl_mutex pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET mlock-AF_INET irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET fs_reclaim irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET stock_lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET pool_lock#2 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &dir->lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 &dir->lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 &c->lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-clock-AF_INET6 irq_context: 0 cb_lock genl_mutex mlock-AF_INET irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET6 irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &xa->xa_lock#6 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &rq->__lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &fsnotify_mark_srcu irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 k-slock-AF_INET6 pool_lock#2 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 k-slock-AF_INET6 krc.lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &msk->pm.lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET elock-AF_INET6 irq_context: 0 cb_lock genl_mutex (work_completion)(&msk->work) irq_context: 0 cb_lock genl_mutex &xa->xa_lock#6 irq_context: 0 cb_lock genl_mutex &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &dir->lock irq_context: 0 sk_lock-AF_INET6 lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC batched_entropy_u8.lock irq_context: 0 sk_lock-AF_TIPC kfence_freelist_lock irq_context: 0 sk_lock-AF_TIPC &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &fq->mq_flush_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_lock_key#8 bit_wait_table + i irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 cb_lock remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET &c->lock irq_context: 0 cb_lock genl_mutex msk_lock-AF_INET k-sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock btf_idr_lock irq_context: 0 lock btf_idr_lock pool_lock#2 irq_context: 0 rcu_read_lock &loc_l->lock irq_context: 0 rcu_read_lock &loc_l->lock &l->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) btf_idr_lock irq_context: 0 rcu_read_lock &l->lock irq_context: 0 sk_lock-AF_NETLINK &rq->__lock irq_context: 0 sk_lock-AF_NETLINK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &n->list_lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex bpf_devs_lock stock_lock irq_context: 0 rtnl_mutex bpf_devs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_devs_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex bpf_devs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex bpf_devs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex bpf_devs_lock stock_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex bpf_devs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex bpf_devs_lock pool_lock#2 irq_context: 0 (wq_completion)events xfrm_state_gc_work &rnp->exp_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) btf_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) btf_idr_lock pool_lock#2 irq_context: 0 &p->lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &p->lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &p->lock remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 oom_adj_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock prog_idr_lock &n->list_lock irq_context: 0 lock prog_idr_lock &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx irq_context: 0 nlk_cb_mutex-GENERIC &lock->wait_lock irq_context: 0 nlk_cb_mutex-GENERIC &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&pool->idle_timer) &pool->lock irq_context: softirq (&pool->idle_timer) &pool->lock &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock &base->lock irq_context: softirq (&pool->idle_timer) &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex fs_reclaim irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex (console_sem).lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 base_sockets.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_ALG fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_ALG fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 base_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_ISDN irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &c->lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 alg_types_sem &rq->__lock irq_context: 0 alg_types_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_ALG &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key bit_wait_table + i irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &n->list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC &n->list_lock irq_context: 0 nlk_cb_mutex-GENERIC &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex.wait_lock irq_context: 0 nlk_cb_mutex-GENERIC &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) quarantine_lock irq_context: 0 nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &sch->q.lock crngs.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults batched_entropy_u8.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults kfence_freelist_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &meta->lock irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &____s->seqcount irq_context: 0 nlk_cb_mutex-GENERIC &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &sb->s_type->i_lock_key#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &dir->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex (console_sem).lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex &table->hash[i].lock irq_context: 0 cb_lock genl_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 cb_lock genl_mutex k-clock-AF_INET6 irq_context: 0 crypto_default_null_skcipher_lock irq_context: 0 jbd2_handle irq_context: 0 &ep->mtx wakeup_ida.xa_lock irq_context: 0 &ep->mtx &x->wait#9 irq_context: 0 &ep->mtx &k->list_lock irq_context: 0 &ep->mtx gdp_mutex irq_context: 0 &ep->mtx gdp_mutex &k->list_lock irq_context: 0 &ep->mtx gdp_mutex fs_reclaim irq_context: 0 &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &ep->mtx gdp_mutex &c->lock irq_context: 0 &ep->mtx gdp_mutex pool_lock#2 irq_context: 0 &ep->mtx gdp_mutex lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 &ep->mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 &ep->mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &ep->mtx gdp_mutex &sem->wait_lock irq_context: 0 &ep->mtx gdp_mutex &p->pi_lock irq_context: 0 &ep->mtx gdp_mutex &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx gdp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx gdp_mutex &rq->__lock irq_context: 0 &ep->mtx lock irq_context: 0 &ep->mtx lock kernfs_idr_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem irq_context: 0 &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &ep->mtx bus_type_sem irq_context: 0 &ep->mtx sysfs_symlink_target_lock irq_context: 0 &ep->mtx uevent_sock_mutex irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx uevent_sock_mutex pool_lock#2 irq_context: 0 &ep->mtx uevent_sock_mutex nl_table_lock irq_context: 0 &ep->mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &ep->mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 &ep->mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &ep->mtx subsys mutex#15 irq_context: 0 &ep->mtx subsys mutex#15 &rq->__lock irq_context: 0 &ep->mtx subsys mutex#15 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier irq_context: 0 &journal->j_barrier rcu_node_0 irq_context: 0 &journal->j_barrier &rq->__lock irq_context: 0 &journal->j_barrier &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_wait_commit irq_context: 0 &journal->j_barrier &journal->j_wait_done_commit irq_context: 0 &journal->j_barrier &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &x->wait#26 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &dd->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &dd->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx subsys mutex#15 &cfs_rq->removed.lock irq_context: 0 &ep->mtx subsys mutex#15 &obj_hash[i].lock irq_context: 0 &ep->mtx subsys mutex#15 pool_lock#2 irq_context: 0 &ep->mtx subsys mutex#15 &k->k_lock irq_context: 0 &ep->mtx events_lock irq_context: 0 &ep->mtx &dentry->d_lock irq_context: 0 &ep->mtx uevent_sock_mutex &____s->seqcount irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &ei->i_es_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &ei->i_es_lock key#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &ws->lock irq_context: 0 &ep->mtx &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ws->lock &obj_hash[i].lock irq_context: 0 &ep->mtx &u->lock irq_context: 0 &ep->mtx &ep->lock &ws->lock irq_context: 0 &ep->mtx &ep->lock &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ep->lock &ws->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &mapping->private_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &meta->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex kfence_freelist_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#6 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#5 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &lruvec->lru_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &ep->mtx &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ep->mtx wakeup_srcu irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &x->wait#3 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx (&ws->timer) irq_context: 0 &ep->mtx &base->lock irq_context: 0 &ep->mtx &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &ep->mtx &root->kernfs_rwsem pool_lock#2 irq_context: 0 &ep->mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 &ep->mtx subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 &ep->mtx deferred_probe_mutex irq_context: 0 &ep->mtx device_links_lock irq_context: 0 &ep->mtx mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx remove_cache_srcu irq_context: 0 &ep->mtx remove_cache_srcu quarantine_lock irq_context: 0 &ep->mtx remove_cache_srcu &c->lock irq_context: 0 &ep->mtx remove_cache_srcu &n->list_lock irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 &ep->mtx remove_cache_srcu &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx deleted_ws.lock irq_context: 0 events_lock irq_context: 0 wakeup_srcu irq_context: 0 wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&ws->timer) irq_context: 0 subsys mutex#15 irq_context: 0 subsys mutex#15 &k->k_lock irq_context: 0 subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 uevent_sock_mutex &c->lock irq_context: 0 gdp_mutex sysfs_symlink_target_lock irq_context: 0 gdp_mutex &obj_hash[i].lock irq_context: 0 &ws->lock irq_context: 0 deleted_ws.lock irq_context: 0 wakeup_ida.xa_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &pcp->lock &zone->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex (&timer.timer) irq_context: 0 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &dtab->index_lock &c->lock irq_context: 0 &ep->mtx &n->list_lock irq_context: 0 &ep->mtx &n->list_lock &c->lock irq_context: 0 &ep->mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &____s->seqcount#2 irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 crypto_default_null_skcipher_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &u->lock &u->peer_wait irq_context: 0 &ep->mtx uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &n->list_lock irq_context: 0 tomoyo_ss rcu_read_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem rcu_node_0 irq_context: 0 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 tomoyo_ss rcu_read_lock pcpu_lock stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.barrier_mutex.wait_lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &ep->mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex ipvs->sync_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex ipvs->sync_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex ipvs->sync_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex ipvs->sync_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex ipvs->sync_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex &n->list_lock irq_context: 0 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock irq_context: 0 &journal->j_barrier jbd2_handle irq_context: 0 &journal->j_barrier jbd2_handle &rq->__lock irq_context: 0 &journal->j_barrier jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 &ep->mtx uevent_sock_mutex &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 &lruvec->lru_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_node_0 irq_context: 0 &journal->j_barrier &lock->wait_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->list_lock &c->lock irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &bgl->locks[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 &ep->mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &ep->mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 &ep->mtx fill_pool_map-wait-type-override pool_lock irq_context: softirq (t) irq_context: softirq (t) &obj_hash[i].lock irq_context: softirq (t) &base->lock irq_context: softirq (t) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx batched_entropy_u8.lock irq_context: 0 &ep->mtx kfence_freelist_lock irq_context: 0 &ep->mtx &meta->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &ep->mtx uevent_sock_mutex &____s->seqcount#2 irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 &sighand->siglock batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 &sighand->siglock kfence_freelist_lock irq_context: 0 tasklist_lock &sighand->siglock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[1] irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 &c->lock irq_context: 0 &ep->mtx &sem->wait_lock irq_context: 0 &ep->mtx &p->pi_lock irq_context: 0 &ep->mtx &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx lock kernfs_idr_lock &c->lock irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_RDS batched_entropy_u16.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &rs->rs_recv_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) rcu_read_lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock key#14 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock irq_context: 0 br_ioctl_mutex rtnl_mutex.wait_lock irq_context: 0 br_ioctl_mutex &p->pi_lock irq_context: 0 br_ioctl_mutex &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &sighand->siglock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#6 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &lock->wait_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &n->list_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &rq->__lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex quarantine_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex quarantine_lock irq_context: 0 br_ioctl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work ima_keys_lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock crngs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_node_0 irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 tomoyo_ss remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &n->list_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock kfence_freelist_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_dev_list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) batched_entropy_u8.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock link_idr_lock &n->list_lock irq_context: 0 lock link_idr_lock &n->list_lock &c->lock irq_context: softirq (&q->timer) irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&q->timer) rcu_read_lock pool_lock#2 irq_context: softirq (&q->timer) &obj_hash[i].lock irq_context: softirq (&q->timer) pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem quarantine_lock irq_context: 0 mapping.invalidate_lock &n->list_lock irq_context: 0 mapping.invalidate_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock rcu_read_lock &n->list_lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock bit_wait_table + i irq_context: 0 &iint->mutex fs_reclaim &rq->__lock irq_context: 0 &iint->mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &journal->j_list_lock bit_wait_table + i irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &base->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &xa->xa_lock#6 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 lock#3 &rq->__lock &obj_hash[i].lock irq_context: 0 lock#3 &rq->__lock &base->lock irq_context: 0 lock#3 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 tomoyo_ss remove_cache_srcu &meta->lock irq_context: 0 tomoyo_ss remove_cache_srcu kfence_freelist_lock irq_context: 0 &nft_net->commit_mutex nl_table_lock irq_context: 0 &nft_net->commit_mutex nl_table_wait.lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &nft_net->commit_mutex rlock-AF_NETLINK irq_context: 0 &nft_net->commit_mutex remove_cache_srcu irq_context: 0 &nft_net->commit_mutex remove_cache_srcu quarantine_lock irq_context: 0 &nft_net->commit_mutex remove_cache_srcu &c->lock irq_context: 0 &nft_net->commit_mutex remove_cache_srcu &n->list_lock irq_context: 0 &nft_net->commit_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &nft_net->commit_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &nft_net->commit_mutex &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &dir->lock#2 irq_context: 0 namespace_sem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex irq_context: 0 bpf_stats_enabled_mutex &newf->file_lock irq_context: 0 bpf_stats_enabled_mutex fs_reclaim irq_context: 0 bpf_stats_enabled_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bpf_stats_enabled_mutex stock_lock irq_context: 0 bpf_stats_enabled_mutex &rq->__lock irq_context: 0 bpf_stats_enabled_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex pool_lock#2 irq_context: 0 bpf_stats_enabled_mutex &sb->s_type->i_lock_key#15 irq_context: 0 bpf_stats_enabled_mutex &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET stock_lock irq_context: 0 rtnl_mutex &mm->mmap_lock fs_reclaim irq_context: 0 rtnl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &mm->mmap_lock &____s->seqcount irq_context: 0 rtnl_mutex &mm->mmap_lock stock_lock irq_context: 0 rtnl_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_X25 irq_context: 0 sk_lock-AF_X25 slock-AF_X25 irq_context: 0 slock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 slock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 wlock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 &list->lock#35 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 x25_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 rlock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_X25 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bt_proto_lock rfcomm_sk_list.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (console_sem).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &____s->seqcount#14 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &policy->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &list->lock#33 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &____s->seqcount#14 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock krc.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM slock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &htab->buckets[i].lock irq_context: 0 &u->iolock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM rlock-AF_BLUETOOTH irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#6 &xa->xa_lock#12 irq_context: 0 &type->i_mutex_dir_key#6 &xa->xa_lock#12 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#6 stock_lock irq_context: 0 &type->i_mutex_dir_key#6 &dentry->d_lock &wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM rcu_node_0 irq_context: 0 rtnl_mutex &br->lock &meta->lock irq_context: 0 rtnl_mutex (&pmctx->ip6_mc_router_timer) irq_context: 0 rtnl_mutex (&pmctx->ip4_mc_router_timer) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM slock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 rfcomm_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &d->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#36 irq_context: 0 rtnl_mutex &br->hash_lock _xmit_ETHER irq_context: 0 rtnl_mutex &br->hash_lock _xmit_ETHER pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &zone->lock &____s->seqcount irq_context: 0 &smc->clcsock_release_lock rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex quarantine_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock _xmit_ETHER krc.lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex stock_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 sk_lock-AF_INET &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &meta->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 namespace_sem rcu_node_0 irq_context: 0 &smc->clcsock_release_lock nf_sockopt_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock nf_sockopt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock &n->list_lock irq_context: 0 rtnl_mutex &br->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 nfnl_subsys_ulog &log->instances_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_node_0 irq_context: 0 nfnl_subsys_ulog &log->instances_lock pool_lock#2 irq_context: 0 nfnl_subsys_ulog &log->instances_lock &obj_hash[i].lock irq_context: 0 nfnl_subsys_ulog &log->instances_lock &dir->lock irq_context: 0 nfnl_subsys_ulog &inst->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock &inst->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock pool_lock#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 ip6_sk_fl_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &c->lock irq_context: 0 nfnl_subsys_ulog &log->instances_lock &c->lock irq_context: 0 &lo->lo_mutex &rq->__lock irq_context: 0 k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &dir->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &msk->pm.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 irq_context: 0 cb_lock &devlink->lock_key#4 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &p->alloc_lock irq_context: 0 cb_lock &devlink->lock_key#4 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#4 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#4 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#4 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 mount_lock irq_context: 0 cb_lock &devlink->lock_key#4 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex &c->lock irq_context: 0 rtnl_mutex noop_qdisc.q.lock crngs.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex netpoll_srcu irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex net_rwsem irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &pn->hash_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tn->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &dev->tx_global_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sch->q.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex __ip_vs_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex __ip_vs_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex _xmit_ETHER irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &im->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex krc.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex class irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (&tbl->proxy_timer) irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &base->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex flowtable_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &dir->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock &n->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock &n->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tbl->lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &ndev->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &ndev->lock &base->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &ifa->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_query_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_base_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex cpu_hotplug_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bpf_devs_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &devlink_port->type_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex sysctl_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex sysctl_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &ul->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &net->xdp.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex mirred_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &nft_net->commit_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &ent->pde_unload_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_report_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &pnn->pndevs.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &pnn->routes.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &pnettable->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex smc_ib_devices.mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex target_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (console_sem).lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &k->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex subsys mutex#17 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &x->wait#9 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex dpm_list_mtx irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &dev->power.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex deferred_probe_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex device_links_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex mount_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bpf_devs_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#4 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex &x->wait#24 irq_context: 0 cb_lock &devlink->lock_key#4 dev_base_lock irq_context: 0 cb_lock &devlink->lock_key#4 lweventlist_lock irq_context: 0 cb_lock &devlink->lock_key#4 netdev_unregistering_wq.lock irq_context: 0 cb_lock &devlink->lock_key#4 krc.lock irq_context: 0 cb_lock &devlink->lock_key#4 &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 &dir->lock#2 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#4 &base->lock irq_context: 0 cb_lock &devlink->lock_key#4 &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#4 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 cb_lock &devlink->lock_key#4 nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#4 nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#4 &xa->xa_lock#14 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex fib_info_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &ep->poll_wait irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 lock#4 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_wait_done_commit irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_wait_done_commit irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &xa->xa_lock#6 key#13 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &cfs_rq->removed.lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 &xa->xa_lock#14 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex cbs_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 cb_lock &devlink->lock_key#4 &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 (work_completion)(&(&hwstats->traffic_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#4 &hwstats->hwsdev_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 &(&fn_net->fib_chain)->lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 cb_lock &devlink->lock_key#4 (work_completion)(&data->fib_flush_work) irq_context: 0 cb_lock &devlink->lock_key#4 (work_completion)(&data->fib_event_work) irq_context: 0 cb_lock &devlink->lock_key#4 (work_completion)(&ht->run_work) irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex &meta->lock irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#4 &nsim_trap_data->trap_lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#4 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 ®ion->snapshot_lock irq_context: 0 cb_lock &devlink->lock_key#4 pcpu_alloc_mutex irq_context: 0 cb_lock &devlink->lock_key#4 pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#4 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#4 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &xa->xa_lock#12 irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#4 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 stock_lock irq_context: 0 cb_lock &devlink->lock_key#4 stack_depot_init_mutex irq_context: 0 cb_lock &devlink->lock_key#4 crngs.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#4 &fq->mq_flush_lock irq_context: 0 sb_writers#4 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#4 &x->wait#26 irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex input_pool.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex stock_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex failover_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &vn->sock_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#4 (&timer.timer) irq_context: 0 cb_lock &devlink->lock_key#4 quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx gdp_mutex &____s->seqcount#2 irq_context: 0 &ep->mtx gdp_mutex &____s->seqcount irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->lock fs_reclaim irq_context: 0 rtnl_mutex &block->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &block->lock pool_lock#2 irq_context: 0 rtnl_mutex &chain->filter_chain_lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &rq->__lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cls_mod_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &block->proto_destroy_lock irq_context: 0 rtnl_mutex &block->proto_destroy_lock irq_context: 0 rtnl_mutex &block->proto_destroy_lock &rq->__lock irq_context: 0 rtnl_mutex &block->proto_destroy_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->lock &c->lock irq_context: 0 rtnl_mutex &block->lock &n->list_lock irq_context: 0 rtnl_mutex &block->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &block->lock &rq->__lock irq_context: 0 rtnl_mutex &block->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &block->lock nl_table_lock irq_context: 0 rtnl_mutex &block->lock nl_table_wait.lock irq_context: 0 &ep->mtx quarantine_lock irq_context: 0 &ep->mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#8 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: 0 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 sb_writers#4 batched_entropy_u8.lock crngs.lock irq_context: 0 &u->iolock &rq->__lock &obj_hash[i].lock irq_context: 0 &u->iolock &rq->__lock &base->lock irq_context: 0 &u->iolock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#6 stock_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock key#10 irq_context: 0 rtnl_mutex &block->lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &block->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &pcp->lock &zone->lock irq_context: 0 &ep->mtx rcu_node_0 irq_context: 0 &ep->mtx uevent_sock_mutex &n->list_lock irq_context: 0 &ep->mtx uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 &ep->mtx fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx &pcp->lock &zone->lock irq_context: 0 &ep->mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &u->iolock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &ep->mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &cfs_rq->removed.lock irq_context: 0 &ep->mtx &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &ep->mtx &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &bgl->locks[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx lock kernfs_idr_lock &n->list_lock irq_context: 0 &ep->mtx lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &ep->mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_UNIX &mm->mmap_lock irq_context: 0 sk_lock-AF_UNIX fs_reclaim irq_context: 0 sk_lock-AF_UNIX fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_UNIX fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_UNIX &c->lock irq_context: 0 sk_lock-AF_UNIX free_vmap_area_lock irq_context: 0 sk_lock-AF_UNIX vmap_area_lock irq_context: 0 sk_lock-AF_UNIX &____s->seqcount irq_context: 0 sk_lock-AF_UNIX stock_lock irq_context: 0 sk_lock-AF_UNIX pcpu_alloc_mutex irq_context: 0 sk_lock-AF_UNIX pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_UNIX &obj_hash[i].lock irq_context: 0 sk_lock-AF_UNIX pack_mutex irq_context: 0 sk_lock-AF_UNIX batched_entropy_u32.lock irq_context: 0 sk_lock-AF_UNIX text_mutex irq_context: 0 sk_lock-AF_UNIX text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_UNIX &fp->aux->used_maps_mutex irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR slock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR clock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rlock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_QIPCRTR irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX slock-AF_PPPOX irq_context: 0 slock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_PPPOX irq_context: 0 sb_writers#8 iattr_mutex &rq->__lock irq_context: 0 sb_writers#8 iattr_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET elock-AF_INET irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal rcu_node_0 irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &sctp_ep_hashtable[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &sctp_ep_hashtable[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &ht->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &ht->lock irq_context: 0 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->notification_waitq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem &rq->__lock irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_KCM clock-AF_KCM irq_context: 0 (work_completion)(&kcm->tx_work) irq_context: 0 &mux->rx_lock rlock-AF_KCM irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_node_0 irq_context: 0 nfnl_subsys_ipset &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock &sctp_ep_hashtable[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sock_diag_mutex fs_reclaim irq_context: 0 sock_diag_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sock_diag_mutex pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex rlock-AF_NETLINK irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 slock-AF_INET6 pool_lock#2 irq_context: 0 slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) pool_lock#2 irq_context: 0 nfnl_subsys_ipset &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset &base->lock irq_context: 0 nfnl_subsys_ipset &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &p->alloc_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 rtnl_mutex &list->lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#12 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh &base->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[3] irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 event_mutex irq_context: 0 event_mutex sched_register_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex fs_reclaim irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex &c->lock irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex pool_lock#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 event_mutex tracepoints_mutex irq_context: 0 event_mutex tracepoints_mutex fs_reclaim irq_context: 0 event_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 event_mutex tracepoints_mutex pool_lock#2 irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex &rq->__lock irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&map->gc) irq_context: softirq (&map->gc) &set->lock irq_context: softirq (&map->gc) &obj_hash[i].lock irq_context: softirq (&map->gc) &base->lock irq_context: softirq (&map->gc) &base->lock &obj_hash[i].lock irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex &rq->__lock irq_context: 0 event_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex tasklist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 tracepoints_mutex &n->list_lock irq_context: 0 tracepoints_mutex &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rcu_state.gp_wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 defrag4_mutex irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &meta->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock kfence_freelist_lock irq_context: hardirq rcu_read_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &xa->xa_lock#12 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &n->list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &xa->xa_lock#12 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &xa->xa_lock#12 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem stock_lock irq_context: 0 kn->active#59 fs_reclaim irq_context: 0 kn->active#59 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#59 stock_lock irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 irq_context: 0 &f->f_pos_lock sb_writers#10 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &fsnotify_mark_srcu &rq->__lock irq_context: 0 kn->active#54 stock_lock irq_context: 0 kn->active#54 &n->list_lock irq_context: 0 kn->active#54 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock kthread_create_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &x->wait irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex kn->active#59 &group->rtpoll_trigger_lock &obj_hash[i].lock irq_context: 0 &rq->__lock &rt_rq->rt_runtime_lock irq_context: 0 &group->rtpoll_wait irq_context: 0 &f->f_pos_lock sb_writers#10 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock rcu_read_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.waiters.lock irq_context: softirq (&group->rtpoll_timer) irq_context: softirq (&group->rtpoll_timer) &group->rtpoll_wait irq_context: softirq (&group->rtpoll_timer) &group->rtpoll_wait &p->pi_lock irq_context: softirq (&group->rtpoll_timer) &group->rtpoll_wait &p->pi_lock &rq->__lock irq_context: softirq (&group->rtpoll_timer) &group->rtpoll_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&group->rtpoll_timer) &group->rtpoll_wait &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 &group->rtpoll_trigger_lock irq_context: 0 &group->rtpoll_trigger_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->rtpoll_trigger_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &group->rtpoll_trigger_lock rcu_read_lock &base->lock irq_context: 0 &group->rtpoll_trigger_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 &kernfs_locks->open_file_mutex[count] &group->rtpoll_trigger_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &group->rtpoll_trigger_lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &group->rtpoll_trigger_lock &base->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &group->rtpoll_trigger_lock &base->lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &group->rtpoll_trigger_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &group->rtpoll_trigger_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &p->pi_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &x->wait irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem pool_lock#2 irq_context: 0 rtnl_mutex &sch->q.lock batched_entropy_u32.lock irq_context: 0 rcu_read_lock_bh &sch->q.lock irq_context: 0 rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 sb_writers#4 &iint->mutex &ei->xattr_sem irq_context: 0 sb_writers#4 &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex fs_reclaim irq_context: 0 sb_writers#4 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#59 &c->lock irq_context: 0 kn->active#59 &rq->__lock irq_context: 0 kn->active#59 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &n->list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &n->list_lock &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &pn->hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &tb->tb6_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cgroup_mutex.wait_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &ul->lock#2 irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 &tbl->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 class irq_context: 0 rtnl_mutex team->team_lock_key#7 (&tbl->proxy_timer) irq_context: 0 rtnl_mutex team->team_lock_key#7 &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 proc_subdir_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &ent->pde_unload_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_query_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_report_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 sysctl_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 sysctl_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 sysctl_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 sysctl_lock krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 quarantine_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rnp->exp_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex.wait_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_state.exp_mutex &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &iint->mutex &c->lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock &n->list_lock irq_context: 0 rtnl_mutex &br->hash_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#10 rcu_node_0 irq_context: 0 kn->active#54 &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET (kmod_concurrent_max).lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &c->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &x->wait#17 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex text_mutex.wait_lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) rcu_node_0 irq_context: 0 bt_proto_lock sco_sk_list.lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PACKET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 sco_sk_list.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET running_helpers_waitq.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 &f->f_owner.lock irq_context: 0 &xt[i].mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (&q->adapt_timer) irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fastopen_seqlock.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->waiters irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &rsp->gp_wait irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &bond->stats_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &dev_addr_list_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &dev_addr_list_lock_key krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex team->team_lock_key#7 stock_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)bond1 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) &bond->mode_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) &base->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &bond->mode_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &dev_addr_list_lock_key pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bond->ad_work)->timer irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bond->mii_work)->timer irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &ul->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key krc.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 pool_lock irq_context: softirq (&tbl->periodic_timer) irq_context: softirq (&tbl->periodic_timer) &obj_hash[i].lock irq_context: softirq (&tbl->periodic_timer) &base->lock irq_context: softirq (&tbl->periodic_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) &bond->mode_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) &base->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &cfs_rq->removed.lock irq_context: 0 &journal->j_barrier &obj_hash[i].lock irq_context: 0 &journal->j_barrier pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex &pn->l2tp_tunnel_idr_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: softirq (&tsc_sync_check_timer) irq_context: softirq (&tsc_sync_check_timer) &obj_hash[i].lock irq_context: softirq (&tsc_sync_check_timer) &base->lock irq_context: softirq (&tsc_sync_check_timer) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &bond->mode_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &base->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex nbd_index_mutex irq_context: 0 cb_lock genl_mutex &nbd->config_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &nbd->config_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &nbd->config_lock &bdev->bd_size_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock fs_reclaim irq_context: 0 cb_lock genl_mutex &nbd->config_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &q->queue_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 cb_lock genl_mutex &nbd->config_lock set->srcu irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &nbd->config_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &nbd->config_lock &x->wait#3 irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock set->srcu irq_context: 0 &pn->l2tp_tunnel_idr_lock irq_context: 0 &pn->l2tp_tunnel_idr_lock &c->lock irq_context: 0 &pn->l2tp_tunnel_idr_lock pool_lock#2 irq_context: 0 sk_lock-AF_PPPOX fs_reclaim irq_context: 0 sk_lock-AF_PPPOX fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PPPOX &c->lock irq_context: 0 sk_lock-AF_PPPOX pool_lock#2 irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &tunnel->hlist_lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &tunnel->hlist_lock &pn->l2tp_session_hlist_lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &rq->__lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock fs_reclaim irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &c->lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &n->list_lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock pool_lock#2 irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &dir->lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &pn->all_channels_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem &pch->downl irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->upl irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->all_channels_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pf->rwait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &list->lock#28 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &tunnel->hlist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->l2tp_session_hlist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &list->lock#37 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &ps->sk_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)l2tp irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &tunnel->hlist_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock pool_lock#2 irq_context: softirq rcu_callback rlock-AF_PPPOX irq_context: softirq rcu_callback wlock-AF_PPPOX irq_context: softirq rcu_callback clock-AF_INET6 irq_context: softirq rcu_callback krc.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)bond4 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->ad_work)->work) irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->ad_work)->work) &bond->mode_lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->ad_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->ad_work)->work) &base->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->ad_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#4 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#7 rcu_read_lock &bond->stats_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 &____s->seqcount#11 irq_context: 0 &sb->s_type->i_mutex_key#10 &ping_table.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rcu_state.expedited_wq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PACKET &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_PACKET rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_PACKET rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex nf_ct_proto_mutex irq_context: 0 &nft_net->commit_mutex nf_hook_mutex irq_context: 0 &nft_net->commit_mutex nf_hook_mutex fs_reclaim irq_context: 0 &nft_net->commit_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &nft_net->commit_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &nft_net->commit_mutex nf_hook_mutex stock_lock irq_context: 0 &nft_net->commit_mutex nf_hook_mutex pool_lock#2 irq_context: 0 &nft_net->commit_mutex cpu_hotplug_lock irq_context: 0 &nft_net->commit_mutex tk_core.seq.seqcount irq_context: 0 &nft_net->commit_mutex remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_PACKET fs_reclaim &rq->__lock irq_context: 0 &nft_net->commit_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_PACKET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_PACKET rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_PACKET rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &po->pg_vec_lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->bind_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->pg_vec_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->pg_vec_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_INET6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem &rq->__lock irq_context: 0 &disk->open_mutex &lock->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_lock_key#23 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &____s->seqcount irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK &mm->mmap_lock irq_context: 0 sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 sk_lock-AF_VSOCK vsock_table_lock batched_entropy_u32.lock irq_context: 0 fanout_mutex irq_context: 0 fanout_mutex fs_reclaim irq_context: 0 fanout_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 fanout_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 fanout_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fanout_mutex &c->lock irq_context: 0 fanout_mutex pool_lock#2 irq_context: 0 fanout_mutex &po->bind_lock irq_context: 0 fanout_mutex &po->bind_lock ptype_lock irq_context: 0 fanout_mutex &po->bind_lock &match->lock irq_context: 0 fanout_mutex &po->bind_lock &match->lock ptype_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &match->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &match->lock ptype_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_data_sem irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_VSOCK fs_reclaim irq_context: 0 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 sk_lock-AF_VSOCK &c->lock irq_context: 0 sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 sk_lock-AF_VSOCK &list->lock#38 irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) &list->lock#38 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) vsock_table_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK fs_reclaim irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &c->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &dir->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 pool_lock#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->tx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 vsock_table_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->rx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &list->lock#38 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &rq->__lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK &ei->socket.wq.wait irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &vvs->tx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 sk_lock-AF_VSOCK &vvs->tx_lock irq_context: 0 sk_lock-AF_VSOCK &zone->lock irq_context: 0 sk_lock-AF_VSOCK &____s->seqcount irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 sk_lock-AF_VSOCK &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 sk_lock-AF_VSOCK &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &c->lock irq_context: softirq rcu_read_lock elock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &list->lock#38 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &list->lock#38 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 clock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rlock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &base->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &zone->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &list->lock#38 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &list->lock#21 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &srv->idr_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) fs_reclaim irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC fs_reclaim irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC &dir->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC batched_entropy_u32.lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &base->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 fs_reclaim irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &list->lock#21 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC &list->lock#21 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &srv->idr_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &xa->xa_lock#6 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &fsnotify_mark_srcu irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-slock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC &base->lock irq_context: 0 sk_lock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &zone->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &zone->lock &____s->seqcount irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &list->lock#21 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC &list->lock#21 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &tn->nametbl_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &base->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock irq_context: softirq (&sub->timer) irq_context: softirq (&sub->timer) &sub->lock irq_context: softirq (&sub->timer) &sub->lock &srv->idr_lock irq_context: softirq (&sub->timer) &sub->lock pool_lock#2 irq_context: softirq (&sub->timer) &sub->lock &con->outqueue_lock irq_context: softirq (&sub->timer) &sub->lock rcu_read_lock &pool->lock/1 irq_context: softirq (&sub->timer) &sub->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&sub->timer) &sub->lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&sub->timer) &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock &tn->nametbl_lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock &tn->nametbl_lock &service->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock &tn->nametbl_lock &service->lock krc.lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock (&sub->timer) irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock &base->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->sub_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC fs_reclaim irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC &list->lock#21 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC slock-AF_TIPC &list->lock#21 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &n->list_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &n->list_lock &c->lock irq_context: softirq (&sub->timer) &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&sub->timer) &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC &____s->seqcount#2 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) k-sk_lock-AF_TIPC &____s->seqcount irq_context: softirq (&sub->timer) &sub->lock &c->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &n->list_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &sk->sk_lock.wq irq_context: 0 slock-AF_TIPC &sk->sk_lock.wq irq_context: 0 slock-AF_TIPC &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_TIPC &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_TIPC &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock &tn->nametbl_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock &tn->nametbl_lock &service->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock (&sub->timer) irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock &base->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock &tn->nametbl_lock &service->lock krc.lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &srv->idr_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &srv->idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &srv->idr_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &base->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &xa->xa_lock#6 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &fsnotify_mark_srcu irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &con->outqueue_lock irq_context: 0 &u->iolock &mm->mmap_lock stock_lock irq_context: 0 &u->iolock &mm->mmap_lock &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &u->iolock &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &u->iolock &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &sem->wait_lock irq_context: 0 &u->iolock &p->pi_lock irq_context: 0 &u->iolock &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 &u->iolock &mm->mmap_lock &sem->wait_lock irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex ipvs->sync_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex ipvs->sync_mutex pool_lock#2 irq_context: 0 epnested_mutex &ep->mtx/1 irq_context: 0 epnested_mutex &ep->mtx/1 &rq->__lock irq_context: 0 epnested_mutex &ep->mtx/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx stock_lock irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &f->f_lock irq_context: 0 epnested_mutex &ep->mtx wakeup_ida.xa_lock irq_context: 0 epnested_mutex &ep->mtx &x->wait#9 irq_context: 0 epnested_mutex &ep->mtx &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx &k->list_lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &k->list_lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex fs_reclaim irq_context: 0 epnested_mutex &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 epnested_mutex &ep->mtx gdp_mutex lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &rq->__lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 epnested_mutex &ep->mtx lock irq_context: 0 epnested_mutex &ep->mtx lock kernfs_idr_lock irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 epnested_mutex &ep->mtx bus_type_sem irq_context: 0 epnested_mutex &ep->mtx sysfs_symlink_target_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex &rq->__lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex fs_reclaim irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex nl_table_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex &c->lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx subsys mutex#15 irq_context: 0 epnested_mutex &ep->mtx subsys mutex#15 &rq->__lock irq_context: 0 epnested_mutex &ep->mtx subsys mutex#15 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx subsys mutex#15 &k->k_lock irq_context: 0 epnested_mutex &ep->mtx events_lock irq_context: 0 epnested_mutex &ep->mtx &dentry->d_lock irq_context: 0 epnested_mutex &ep->mtx &ep->poll_wait irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &ep->lock irq_context: 0 epnested_mutex &ep->mtx &rq->__lock irq_context: 0 epnested_mutex &ep->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &u->iolock &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#60 fs_reclaim irq_context: 0 kn->active#60 fs_reclaim &rq->__lock irq_context: 0 kn->active#60 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#60 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#60 stock_lock irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#60 devcgroup_mutex irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 &asoc->wait &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &asoc->wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &asoc->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock &c->lock irq_context: 0 &u->iolock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx rcu_read_lock &ep->poll_wait irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 ipvs->sync_mutex (console_sem).lock irq_context: 0 ipvs->sync_mutex console_lock console_srcu console_owner_lock irq_context: 0 ipvs->sync_mutex console_lock console_srcu console_owner irq_context: 0 ipvs->sync_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 ipvs->sync_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 ipvs->sync_mutex &p->pi_lock irq_context: 0 ipvs->sync_mutex &p->pi_lock &rq->__lock irq_context: 0 ipvs->sync_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &rdma_nl_types[idx].sem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem clients_rwsem irq_context: 0 ipvs->sync_mutex &x->wait irq_context: 0 &rdma_nl_types[idx].sem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem rlock-AF_NETLINK irq_context: 0 rtnl_mutex rcu_read_lock &im->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx &____s->seqcount#2 irq_context: 0 epnested_mutex &ep->mtx rcu_node_0 irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 epnested_mutex &ep->mtx &n->list_lock irq_context: 0 epnested_mutex &ep->mtx &n->list_lock &c->lock irq_context: 0 epnested_mutex &ep->mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &n->list_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex ima_extend_list_mutex.wait_lock irq_context: 0 &iint->mutex ima_extend_list_mutex.wait_lock irq_context: 0 &iint->mutex &p->pi_lock irq_context: 0 &iint->mutex &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &c->lock irq_context: 0 epnested_mutex &ep->mtx &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 epnested_mutex &ep->mtx &obj_hash[i].lock pool_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ret->b_state_lock &journal->j_list_lock quarantine_lock irq_context: 0 sk_lock-AF_INET6 &asoc->wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock quarantine_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &sighand->siglock &n->list_lock irq_context: 0 rcu_read_lock &sighand->siglock &n->list_lock &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_node_0 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 krc.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rnp->exp_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_node_0 irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock stock_lock irq_context: 0 cb_lock ovs_mutex irq_context: 0 cb_lock ovs_mutex &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex pool_lock#2 irq_context: 0 cb_lock ovs_mutex krc.lock irq_context: 0 cb_lock ovs_mutex krc.lock &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex krc.lock &base->lock irq_context: 0 cb_lock ovs_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex &rq->__lock irq_context: 0 cb_lock ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock ovs_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock ovs_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock ovs_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 cb_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 cb_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cb_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss &base->lock irq_context: 0 sb_writers#8 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock fs_reclaim irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#7 &cfs_rq->removed.lock irq_context: 0 sb_writers#7 &obj_hash[i].lock irq_context: 0 sb_writers#7 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 cb_lock rtnl_mutex &dev->power.lock irq_context: 0 proto_tab_lock raw_sk_list.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 kfence_freelist_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 raw_sk_list.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET cpu_hotplug_lock irq_context: softirq rcu_read_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex slock-AF_INET irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 &vma->vm_lock->lock lock#4 irq_context: 0 sk_lock-AF_INET6 &vma->vm_lock->lock lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET6 &vma->vm_lock->lock lock#5 irq_context: 0 sk_lock-AF_INET6 &vma->vm_lock->lock mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 proto_tab_lock &c->lock irq_context: 0 sk_lock-AF_NFC &local->raw_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC &local->raw_sockets.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 delayed_uprobe_lock &rq->__lock irq_context: 0 delayed_uprobe_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) &____s->seqcount#2 irq_context: softirq (&n->timer) &____s->seqcount irq_context: 0 sk_lock-AF_QIPCRTR irq_context: 0 sk_lock-AF_QIPCRTR slock-AF_QIPCRTR irq_context: 0 slock-AF_QIPCRTR irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &n->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &xs->mutex &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &xs->mutex &pcp->lock &zone->lock irq_context: 0 &xs->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&umem->work) irq_context: 0 (wq_completion)events (work_completion)(&umem->work) umem_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &lruvec->lru_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#6 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_list_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 namespace_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &group->mark_mutex rcu_read_lock rcu_node_0 irq_context: 0 &group->mark_mutex rcu_read_lock &rq->__lock irq_context: 0 &group->mark_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 raw_notifier_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 raw_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work &p->pi_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events xfrm_state_gc_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &sem->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &type->i_mutex_dir_key#5 &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)t irq_context: 0 (wq_completion)t (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)t (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_AX25 irq_context: 0 sk_lock-AF_AX25 slock-AF_AX25 irq_context: 0 slock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 slock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 clock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 ax25_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &list->lock#39 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 rlock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 wlock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_AX25 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#61 fs_reclaim irq_context: 0 kn->active#61 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#61 stock_lock irq_context: 0 kn->active#61 &rq->__lock irq_context: 0 kn->active#61 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_node_0 irq_context: 0 delayed_uprobe_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)phy18 irq_context: 0 (wq_completion)phy18 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy18 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#61 &c->lock irq_context: 0 kn->active#61 &n->list_lock irq_context: 0 kn->active#61 &n->list_lock &c->lock irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET init_mm.page_table_lock irq_context: 0 sb_writers#3 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#3 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#3 tomoyo_ss &meta->lock irq_context: 0 sk_lock-AF_TIPC &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_TIPC &pcp->lock &zone->lock &____s->seqcount irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &rq->__lock irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &____s->seqcount#7 irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex rcu_read_lock &nf_nat_locks[i] irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &obj_hash[i].lock irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_TIPC fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_TIPC fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 cb_lock genl_mutex &fn->fou_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock genl_mutex rtnl_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)phy19 irq_context: 0 (wq_completion)phy19 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy19 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN &net->can.rcvlists_lock irq_context: 0 rtnl_mutex sk_lock-AF_CAN irq_context: 0 rtnl_mutex sk_lock-AF_CAN slock-AF_CAN irq_context: 0 rtnl_mutex slock-AF_CAN irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq#2 irq_context: 0 kn->active#62 &rq->__lock irq_context: 0 kn->active#62 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#62 fs_reclaim irq_context: 0 kn->active#62 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#62 stock_lock irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &net->can.rcvlists_lock irq_context: 0 sk_lock-AF_INET6 &list->lock#40 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#62 &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &br->multicast_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->multicast_lock &base->lock irq_context: 0 rtnl_mutex &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq (&peer->T3_rtx_timer) irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 batched_entropy_u32.lock irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 &base->lock irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 pool_lock#2 irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->T3_rtx_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &list->lock#40 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 quarantine_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 krc.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock krc.lock irq_context: softirq rcu_read_lock rcu_read_lock &sctp_port_hashtable[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &sctp_port_hashtable[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock sctp_assocs_id_lock irq_context: softirq rcu_read_lock rcu_read_lock sctp_assocs_id_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock sctp_assocs_id_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount#15 irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount#16 irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 nfnl_grp_active_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock (console_sem).lock irq_context: 0 &pipe->mutex/1 rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rlock-AF_NETLINK irq_context: 0 &pipe->mutex/1 &n->list_lock irq_context: 0 &pipe->mutex/1 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nfnl_grp_active_lock irq_context: 0 sk_lock-AF_INET6 &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 (wq_completion)phy20 irq_context: 0 (wq_completion)phy20 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy20 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &____s->seqcount#2 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nl_table_lock &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) &rq->__lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &rq->__lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &meta->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &base->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &rq->__lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET rcu_read_lock &im->lock irq_context: 0 bpf_dispatcher_xdp.mutex irq_context: 0 bpf_dispatcher_xdp.mutex pack_mutex irq_context: 0 bpf_dispatcher_xdp.mutex fs_reclaim irq_context: 0 bpf_dispatcher_xdp.mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bpf_dispatcher_xdp.mutex pool_lock#2 irq_context: 0 bpf_dispatcher_xdp.mutex free_vmap_area_lock irq_context: 0 bpf_dispatcher_xdp.mutex vmap_area_lock irq_context: 0 bpf_dispatcher_xdp.mutex &____s->seqcount irq_context: 0 bpf_dispatcher_xdp.mutex init_mm.page_table_lock irq_context: 0 bpf_dispatcher_xdp.mutex bpf_lock irq_context: 0 bpf_dispatcher_xdp.mutex text_mutex irq_context: 0 bpf_dispatcher_xdp.mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 bpf_dispatcher_xdp.mutex &obj_hash[i].lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex.wait_lock irq_context: 0 bpf_dispatcher_xdp.mutex &p->pi_lock irq_context: 0 bpf_dispatcher_xdp.mutex &p->pi_lock &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_dispatcher_xdp.mutex &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &im->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex krc.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock rcu_read_lock &ul->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->endpoint_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->endpoint_lock pool_lock#2 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#5 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#12 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 stock_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &dentry->d_lock &wq#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#12 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#12 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key#2 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#4 irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 bpf_dispatcher_xdp.mutex &rnp->exp_lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 nl_table_lock nl_table_wait.lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers &dentry->d_lock irq_context: 0 sb_writers tomoyo_ss irq_context: 0 sb_writers tomoyo_ss &rq->__lock irq_context: 0 sb_writers tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &xattrs->lock irq_context: 0 &pipe->mutex/1 fs_reclaim &rq->__lock irq_context: 0 &pipe->mutex/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock rcu_read_lock &c->lock irq_context: 0 sb_writers tomoyo_ss &c->lock irq_context: 0 sb_writers tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers tomoyo_ss &____s->seqcount irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &base->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER &cfs_rq->removed.lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset rlock-AF_NETLINK irq_context: 0 bpf_dispatcher_xdp.mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex.wait_lock irq_context: 0 &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 nfnl_subsys_ulog nf_log_mutex irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 &f->f_lock fasync_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock delayed_uprobe_lock delayed_uprobe_lock.wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock delayed_uprobe_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock delayed_uprobe_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 delayed_uprobe_lock.wait_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 ip6_fl_lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock ip6_fl_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work sb_lock &obj_hash[i].lock pool_lock irq_context: 0 &xa->xa_lock#6 batched_entropy_u8.lock irq_context: 0 namespace_sem &cfs_rq->removed.lock irq_context: 0 namespace_sem &obj_hash[i].lock irq_context: 0 &xa->xa_lock#6 kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_sk_fl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_fl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_fl_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_fl_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_fl_lock &base->lock &obj_hash[i].lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sighand->siglock &____s->seqcount#2 irq_context: 0 &sighand->siglock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &app->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->join_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->periodic_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &list->lock#14 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->join_timer)#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &app->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &list->lock#15 irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock &n->list_lock irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock &n->list_lock &c->lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (&q->perturb_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &lapb->lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex lapb_list_lock &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex quarantine_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &block->cb_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem netns_bpf_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem netns_bpf_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: softirq net/ipv6/ip6_flowlabel.c:47 irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock &obj_hash[i].lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock &base->lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock &base->lock &obj_hash[i].lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq net/ipv6/ip6_flowlabel.c:57 irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work cpu_hotplug_lock irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem key irq_context: 0 &mm->mmap_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 bpf_module_mutex irq_context: 0 &sighand->siglock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex _xmit_IPGRE &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &bgl->locks[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &sem->wait_lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 hashlimit_mutex irq_context: 0 hashlimit_mutex fs_reclaim irq_context: 0 hashlimit_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 hashlimit_mutex &c->lock irq_context: 0 hashlimit_mutex pool_lock#2 irq_context: 0 hashlimit_mutex free_vmap_area_lock irq_context: 0 hashlimit_mutex vmap_area_lock irq_context: 0 hashlimit_mutex &____s->seqcount irq_context: 0 hashlimit_mutex init_mm.page_table_lock irq_context: 0 hashlimit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 hashlimit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 hashlimit_mutex proc_subdir_lock irq_context: 0 hashlimit_mutex proc_inum_ida.xa_lock irq_context: 0 hashlimit_mutex proc_subdir_lock irq_context: 0 hashlimit_mutex &obj_hash[i].lock irq_context: 0 hashlimit_mutex &base->lock irq_context: 0 hashlimit_mutex &base->lock &obj_hash[i].lock irq_context: softirq &(&hinfo->gc_work)->timer irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &hinfo->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tn->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 hashlimit_mutex &ent->pde_unload_lock irq_context: 0 (work_completion)(&(&hinfo->gc_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hinfo->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock &sem->wait_lock irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &sem->wait_lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET kfence_freelist_lock irq_context: 0 &mm->mmap_lock &po->pg_vec_lock &vma->vm_lock->lock irq_context: 0 &mm->mmap_lock &po->pg_vec_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &po->pg_vec_lock ptlock_ptr(page)#2 key irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &po->pg_vec_lock &rq->__lock irq_context: 0 &mm->mmap_lock &po->pg_vec_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_PACKET &____s->seqcount#2 irq_context: 0 sk_lock-AF_PACKET text_mutex &rq->__lock irq_context: 0 sk_lock-AF_PACKET text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex &rq->__lock irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_PACKET &cfs_rq->removed.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) quarantine_lock irq_context: 0 &tfile->napi_mutex &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &p->pi_lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &rq->__lock irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex crypto_alg_sem irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex fs_reclaim irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex pool_lock#2 irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex &c->lock irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex &n->list_lock irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET tcp_md5sig_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET krc.lock irq_context: 0 nf_connlabels_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_sockopt_mutex &cfs_rq->removed.lock irq_context: 0 nf_sockopt_mutex &obj_hash[i].lock irq_context: 0 nf_sockopt_mutex pool_lock#2 irq_context: 0 sb_writers#4 &iint->mutex &lock->wait_lock irq_context: 0 sb_writers#4 &iint->mutex &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[0] irq_context: 0 &sighand->siglock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq net/ipv4/tcp_ipv4.c:1061 irq_context: softirq net/ipv4/tcp_ipv4.c:1061 rcu_read_lock &pool->lock irq_context: softirq net/ipv4/tcp_ipv4.c:1061 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/ipv4/tcp_ipv4.c:1061 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/ipv4/tcp_ipv4.c:1061 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/ipv4/tcp_ipv4.c:1061 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_nftables irq_context: 0 &pipe->mutex/1 nfnl_subsys_nftables &nft_net->commit_mutex irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex fs_reclaim irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex stock_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex pool_lock#2 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex batched_entropy_u32.lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex (console_sem).lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rhashtable_bucket irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_node_0 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &c->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_owner_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_owner irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock kfence_freelist_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &n->list_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem quarantine_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &meta->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#18 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->queue_stop_reason_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &rnp->exp_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex (work_completion)(&ht->run_work) irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &pool->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &ht->mutex irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &ht->mutex pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->sta_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (&local->dynamic_ps_timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&sdata->recalc_smps) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->csa_finalize_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->color_change_finalize_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->color_change_finalize_work) &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->color_change_finalize_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->dfs_cac_timer_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &list->lock#18 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->filter_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mount_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &block->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock ip6_fl_lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_callback ip6_sk_fl_lock irq_context: softirq rcu_callback ip6_fl_lock irq_context: softirq rcu_callback ip6_fl_lock &obj_hash[i].lock irq_context: softirq rcu_callback ip6_fl_lock &base->lock irq_context: softirq rcu_callback ip6_fl_lock &base->lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex &base->lock irq_context: 0 &nft_net->commit_mutex &base->lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex flowtable_lock irq_context: 0 &nft_net->commit_mutex (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 &nft_net->commit_mutex (work_completion)(&(&flowtable->gc_work)->work) &rq->__lock irq_context: 0 &nft_net->commit_mutex (work_completion)(&(&flowtable->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex &ht->lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock &ht->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &match->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex &lock->wait_lock irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex &rq->__lock irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock fs_reclaim &rq->__lock irq_context: 0 cb_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex stock_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &____s->seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex crngs.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 br_ioctl_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 br_ioctl_mutex rtnl_mutex net_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &tn->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &x->wait#9 irq_context: 0 br_ioctl_mutex rtnl_mutex &k->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex gdp_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex bus_type_sem irq_context: 0 br_ioctl_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex &dev->power.lock irq_context: 0 br_ioctl_mutex rtnl_mutex dpm_list_mtx irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &dir->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 br_ioctl_mutex rtnl_mutex dev_base_lock irq_context: 0 br_ioctl_mutex rtnl_mutex input_pool.lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &tbl->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex failover_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rhashtable_bucket irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &pnettable->lock irq_context: 0 br_ioctl_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &sem->wait_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pcpu_alloc_mutex fs_reclaim &rq->__lock irq_context: 0 pcpu_alloc_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 pcpu_alloc_mutex free_vmap_area_lock pool_lock#2 irq_context: 0 pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rlock-AF_PPPOX irq_context: 0 pcpu_alloc_mutex batched_entropy_u8.lock irq_context: 0 pcpu_alloc_mutex kfence_freelist_lock irq_context: 0 pcpu_alloc_mutex purge_vmap_area_lock irq_context: 0 pcpu_alloc_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 pcpu_alloc_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 pcpu_alloc_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex purge_vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &meta->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex kfence_freelist_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &n->lock irq_context: 0 rtnl_mutex rcu_read_lock &n->lock &____s->seqcount#9 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 lock map_idr_lock &n->list_lock irq_context: 0 lock map_idr_lock &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/ipv4/devinet.c:474 irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key krc.lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 uts_sem &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock key#22 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 ebt_mutex ebt_mutex.wait_lock irq_context: 0 ebt_mutex.wait_lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override &c->lock irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 sctp_assocs_id_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &asoc->wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &asoc->wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &asoc->wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &asoc->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &qs->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rlock-AF_KEY irq_context: 0 &fsnotify_mark_srcu &n->list_lock irq_context: 0 &fsnotify_mark_srcu &n->list_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_CAN fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_CAN fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_CAN pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_CAN &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_CAN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_CAN &net->can.rcvlists_lock irq_context: 0 rtnl_mutex sk_lock-AF_CAN &obj_hash[i].lock irq_context: 0 __ip_vs_mutex &mm->mmap_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss percpu_counters_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss pcpu_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock &____s->seqcount#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &net->xfrm.xfrm_policy_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &policy->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &list->lock#33 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 pool_lock#2 irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &base->lock &obj_hash[i].lock irq_context: 0 lock btf_idr_lock &c->lock irq_context: 0 clock-AF_INET6 irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&tw->tw_timer) irq_context: softirq (&tw->tw_timer) &hashinfo->ehash_locks[i] irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq (&tw->tw_timer) stock_lock irq_context: softirq (&tw->tw_timer) &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock kernfs_pr_cont_lock irq_context: 0 rcu_read_lock kernfs_pr_cont_lock kernfs_rename_lock irq_context: 0 rcu_read_lock kernfs_pr_cont_lock (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#5 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock (console_sem).lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 batched_entropy_u16.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &queue->rskq_lock irq_context: 0 sk_lock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &u->lock &f->f_owner.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_owner.lock irq_context: 0 &f->f_lock fasync_lock &new->fa_lock irq_context: 0 &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 &f->f_lock fasync_lock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu pgd_lock irq_context: 0 remove_cache_srcu stock_lock irq_context: 0 remove_cache_srcu key irq_context: 0 remove_cache_srcu pcpu_lock irq_context: 0 remove_cache_srcu percpu_counters_lock irq_context: 0 remove_cache_srcu pcpu_lock stock_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &group->mark_mutex batched_entropy_u8.lock irq_context: 0 &group->mark_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &meta->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &cfs_rq->removed.lock irq_context: 0 &audit_cmd_mutex.lock irq_context: 0 &audit_cmd_mutex.lock fs_reclaim irq_context: 0 &audit_cmd_mutex.lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &audit_cmd_mutex.lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &audit_cmd_mutex.lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &audit_cmd_mutex.lock &c->lock irq_context: 0 &audit_cmd_mutex.lock pool_lock#2 irq_context: 0 &audit_cmd_mutex.lock rlock-AF_NETLINK irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET key#24 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock kfence_freelist_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &meta->lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem nf_hook_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sec->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &rdev->wpan_phy.queue_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &list->lock#41 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &rdev->wpan_phy.sync_txq irq_context: softirq &list->lock#41 irq_context: softirq rcu_read_lock rcu_read_lock raw_lock irq_context: 0 &map->freeze_mutex irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &base->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&lapb->t1timer) &lapb->lock &n->list_lock irq_context: softirq (&lapb->t1timer) &lapb->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work pool_lock#2 irq_context: 0 clock-AF_NETLINK irq_context: 0 wlock-AF_NETLINK irq_context: 0 delayed_uprobe_lock delayed_uprobe_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 lock pidmap_lock batched_entropy_u8.lock irq_context: 0 lock pidmap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &cfs_rq->removed.lock irq_context: 0 rtnl_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 rtnl_mutex stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim pgd_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim stock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim key irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim percpu_counters_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim pcpu_lock stock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim pool_lock#2 irq_context: 0 sk_lock-AF_PHONET irq_context: 0 sk_lock-AF_PHONET slock-AF_PHONET irq_context: 0 sk_lock-AF_PHONET port_mutex#2 irq_context: 0 sk_lock-AF_PHONET port_mutex#2 local_port_range_lock.seqcount irq_context: 0 sk_lock-AF_PHONET port_mutex#2 &pnsocks.lock irq_context: 0 slock-AF_PHONET irq_context: 0 sk_lock-AF_PHONET fs_reclaim irq_context: 0 sk_lock-AF_PHONET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PHONET pool_lock#2 irq_context: 0 sk_lock-AF_PHONET &c->lock irq_context: 0 sk_lock-AF_PHONET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET slock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &pnsocks.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET resource_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#42 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &n->list_lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &r->producer_lock#4 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u16.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &n->list_lock irq_context: 0 rlock-AF_KCM irq_context: 0 sk_lock-AF_INET rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 rlock-AF_CAN irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock quarantine_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC slock-AF_UNSPEC irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC free_vmap_area_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC free_vmap_area_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC free_vmap_area_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC vmap_area_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &____s->seqcount irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &n->list_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pcpu_alloc_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pack_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC batched_entropy_u32.lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC text_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC text_mutex &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &fp->aux->used_maps_mutex irq_context: 0 rtnl_mutex slock-AF_UNSPEC irq_context: 0 rtnl_mutex &pnn->routes.lock &rq->__lock irq_context: 0 rtnl_mutex &pnn->routes.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_node_0 irq_context: 0 sb_writers#8 &cfs_rq->removed.lock irq_context: 0 sb_writers#3 oom_adj_mutex oom_adj_mutex.wait_lock irq_context: 0 sb_writers#3 oom_adj_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC (console_sem).lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC console_lock console_srcu console_owner_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC console_lock console_srcu console_owner irq_context: 0 cb_lock nlk_cb_mutex-GENERIC console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock nlk_cb_mutex-GENERIC console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock &n->lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock irq_context: 0 wq_pool_attach_mutex &cfs_rq->removed.lock irq_context: 0 wq_pool_attach_mutex &obj_hash[i].lock irq_context: 0 wq_pool_attach_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_barrier irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sem->waiters irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rsp->gp_wait &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rsp->gp_wait pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &x->wait#26 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex (&timer.timer) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 pgd_lock irq_context: 0 sb_writers#5 stock_lock irq_context: 0 sb_writers#5 key irq_context: 0 sb_writers#5 pcpu_lock irq_context: 0 sb_writers#5 percpu_counters_lock irq_context: 0 sb_writers#5 pcpu_lock stock_lock irq_context: 0 sb_writers#5 &cfs_rq->removed.lock irq_context: softirq rcu_callback &rsp->gp_wait &obj_hash[i].lock irq_context: softirq rcu_callback &rsp->gp_wait pool_lock#2 irq_context: 0 sb_writers#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock key#14 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 isotp_notifier_lock irq_context: 0 &so->wait irq_context: 0 &sb->s_type->i_mutex_key#10 isotp_notifier_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_barrier &journal->j_checkpoint_mutex &c->lock irq_context: 0 &type->s_umount_key#49 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 &sb->s_type->i_lock_key#23 rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#23 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_lock_key#23 &dentry->d_lock/1 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &meta->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work kfence_freelist_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET &net->xfrm.xfrm_policy_lock irq_context: 0 &group->mark_mutex &cfs_rq->removed.lock irq_context: 0 &group->mark_mutex &obj_hash[i].lock irq_context: 0 elock-AF_PACKET irq_context: 0 &audit_cmd_mutex.lock &rq->__lock irq_context: 0 &audit_cmd_mutex.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (crypto_chain).rwsem remove_cache_srcu irq_context: 0 (crypto_chain).rwsem remove_cache_srcu quarantine_lock irq_context: 0 (crypto_chain).rwsem remove_cache_srcu &c->lock irq_context: 0 (crypto_chain).rwsem remove_cache_srcu &n->list_lock irq_context: 0 (crypto_chain).rwsem remove_cache_srcu &rq->__lock irq_context: 0 (crypto_chain).rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (crypto_chain).rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock quarantine_lock irq_context: 0 &audit_cmd_mutex.lock &n->list_lock irq_context: 0 &audit_cmd_mutex.lock &n->list_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET elock-AF_INET irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &f->f_lock irq_context: 0 sk_lock-AF_INET &f->f_lock fasync_lock irq_context: 0 sk_lock-AF_INET &f->f_lock fasync_lock &new->fa_lock irq_context: 0 sk_lock-AF_INET &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &f->f_lock fasync_lock pool_lock#2 irq_context: 0 crypto_cfg_mutex irq_context: 0 sk_lock-AF_KCM &c->lock irq_context: 0 sk_lock-AF_KCM &mux->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock remove_cache_srcu irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (crypto_chain).rwsem &n->list_lock irq_context: 0 (crypto_chain).rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->ipv4.ra_mutex irq_context: 0 rtnl_mutex sk_lock-AF_INET _xmit_ETHER irq_context: 0 rtnl_mutex sk_lock-AF_INET _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER krc.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &cfs_rq->removed.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 percpu_counters_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 pcpu_lock stock_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_node_0 irq_context: 0 sk_lock-AF_NETLINK stock_lock irq_context: 0 sk_lock-AF_NETLINK &f->f_lock irq_context: 0 sk_lock-AF_NETLINK &f->f_lock fasync_lock irq_context: 0 sk_lock-AF_NETLINK &f->f_lock fasync_lock &new->fa_lock irq_context: 0 sk_lock-AF_NETLINK &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem pool_lock#2 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) kfence_freelist_lock irq_context: 0 sk_lock-AF_INET free_vmap_area_lock irq_context: 0 sk_lock-AF_INET vmap_area_lock irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_INET init_mm.page_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_TUNNEL6 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_TUNNEL6 &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_TUNNEL6 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 elock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &list->lock#43 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pcpu_lock stock_lock irq_context: 0 &xt[i].mutex pgd_lock irq_context: 0 &xt[i].mutex stock_lock irq_context: 0 &xt[i].mutex key irq_context: 0 &xt[i].mutex pcpu_lock irq_context: 0 &xt[i].mutex percpu_counters_lock irq_context: 0 &xt[i].mutex pcpu_lock stock_lock irq_context: 0 fs_reclaim pgd_lock irq_context: 0 fs_reclaim key irq_context: 0 fs_reclaim pcpu_lock irq_context: 0 fs_reclaim percpu_counters_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &ht->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &ht->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex k-sk_lock-AF_TIPC irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex k-slock-AF_TIPC irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) genl_mutex irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) genl_mutex &ht->lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) genl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) genl_mutex pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 pool_lock#2 irq_context: 0 sb_writers#4 sb_internal &pcp->lock &zone->lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC init_mm.page_table_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_KCM &n->list_lock irq_context: 0 sk_lock-AF_KCM &n->list_lock &c->lock irq_context: 0 sk_lock-AF_KCM &rq->__lock irq_context: 0 sk_lock-AF_KCM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &obj_hash[i].lock irq_context: 0 &xa->xa_lock#12 &c->lock irq_context: 0 &sb->s_type->i_lock_key#16 irq_context: 0 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key pool_lock#2 irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key pool_lock#2 irq_context: 0 &resv_map->lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 mount_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &sb->s_type->i_lock_key#16 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &wb->list_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &wb->list_lock &sb->s_type->i_lock_key#16 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rlock-AF_NETLINK irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock pgd_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock key irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock percpu_counters_lock irq_context: 0 sk_lock-AF_INET (console_sem).lock irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#8 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#8 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#8 tomoyo_ss &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx kernfs_idr_lock irq_context: 0 uevent_sock_mutex &____s->seqcount#2 irq_context: 0 uevent_sock_mutex &____s->seqcount irq_context: 0 gdp_mutex kernfs_idr_lock irq_context: 0 kn->active#5 &rcu_state.expedited_wq irq_context: 0 nf_sockopt_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_UNIX reuseport_lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&mp->timer) &br->multicast_lock &c->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock quarantine_lock irq_context: 0 rtnl_mutex mfc_unres_lock irq_context: 0 rtnl_mutex mfc_unres_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 cb_lock genl_mutex net_dm_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex net_dm_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex net_dm_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex net_dm_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex net_dm_mutex &data->lock irq_context: 0 cb_lock genl_mutex net_dm_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex net_dm_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex net_dm_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoint_srcu irq_context: 0 cb_lock genl_mutex net_dm_mutex &x->wait#3 irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex net_dm_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock genl_mutex net_dm_mutex (&data->send_timer) irq_context: 0 cb_lock genl_mutex net_dm_mutex &base->lock irq_context: 0 cb_lock genl_mutex net_dm_mutex (work_completion)(&data->dm_alert_work) irq_context: 0 cb_lock genl_mutex net_dm_mutex &c->lock irq_context: 0 cb_lock genl_mutex net_dm_mutex tracepoints_mutex &c->lock irq_context: softirq rcu_callback &data->lock irq_context: softirq rcu_callback &data->lock &obj_hash[i].lock irq_context: softirq rcu_callback &data->lock &base->lock irq_context: softirq rcu_callback &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &data->lock irq_context: 0 &data->lock &obj_hash[i].lock irq_context: 0 &data->lock &base->lock irq_context: 0 &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock &dir->lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem ndev_hash_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem crypto_alg_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem devices.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &table->rwlock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &ndev->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &ndev->lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &ndev->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &device->cache_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rdmacg_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex kobj_ns_type_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock pool_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem subsys mutex#83 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem subsys mutex#83 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &n->list_lock &c->lock irq_context: softirq rcu_read_lock &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)infiniband irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 fs_reclaim irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 pool_lock#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->cache_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &obj_hash[i].lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem crngs.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem free_vmap_area_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem vmap_area_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem init_mm.page_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &cq->cq_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem stock_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &dir->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &qp->state_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem kthread_create_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem wq_pool_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem wq_pool_mutex &wq->mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_mad_port_list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &mad_queue->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &qp->rq.producer_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_mad_clients.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_mad_clients.xa_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_mad_clients.xa_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &port_priv->reg_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_agent_port_list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &port_priv->reg_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &cm.device_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 crngs.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#17 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &id_priv->qp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &id_priv->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#18 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#18 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &cm_id_priv->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &cm_id_priv->lock &cm.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#17 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem umad_ida.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem chrdevs_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem dpm_list_mtx &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem req_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#11 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#84 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#84 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pcpu_alloc_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uverbs_ida.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#85 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#85 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#86 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#86 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rds_ib_devices_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_nodev_conns_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem smc_ib_devices.mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &pnettable->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &table->rwlock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) smc_lgr_list.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#15 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#15 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#15 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#83 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#83 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock irq_context: softirq (&data->send_timer) irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &data->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &data->lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &data->lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &data->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) nl_table_wait.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_node_0 irq_context: softirq rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &data->lock &base->lock irq_context: softirq rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex batched_entropy_u8.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex kfence_freelist_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem devices.xa_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &device->event_handler_rwsem &rq->__lock irq_context: 0 sk_lock-AF_CAIF elock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &c->lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &n->list_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &n->list_lock &c->lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock (console_sem).lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &rq->__lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) kfence_freelist_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &port_priv->reg_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 ebt_mutex &cfs_rq->removed.lock irq_context: 0 ebt_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &meta->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &rq->__lock irq_context: 0 &xt[i].mutex &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&data->send_timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock ndev_hash_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &xa->xa_lock#17 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem krc.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &xa->xa_lock#15 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 irq_context: 0 rtnl_mutex &newf->file_lock irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#4 irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 rtnl_mutex tomoyo_ss irq_context: 0 rtnl_mutex tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex tomoyo_ss &c->lock irq_context: 0 rtnl_mutex tomoyo_ss pool_lock#2 irq_context: 0 rtnl_mutex tomoyo_ss &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &data->lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &data->lock irq_context: 0 ipvs->sync_mutex &mm->mmap_lock irq_context: 0 &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex purge_vmap_area_lock quarantine_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &data->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &data->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#6 &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock &____s->seqcount irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock &mm->page_table_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page) irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 dup_mmap_sem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock rtnl_mutex k-sk_lock-AF_INET &mm->mmap_lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &data->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &data->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &data->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &data->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &data->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &data->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) quarantine_lock irq_context: 0 rcu_read_lock rcu_read_lock &data->lock irq_context: 0 rcu_read_lock rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock &data->lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock stock_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock key irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 &po->bind_lock irq_context: 0 clock-AF_PACKET irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 rtnl_mutex &data->lock irq_context: 0 rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 unix_gc_lock rlock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 &bsd_socket_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#10 &wb->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#10 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 inode_hash_lock irq_context: 0 &sb->s_type->i_mutex_key#10 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#10 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &data->lock irq_context: 0 rtnl_mutex &pnettable->lock &rq->__lock irq_context: 0 rcu_state.barrier_mutex rcu_node_0 irq_context: 0 sk_lock-AF_INET (&tw->tw_timer) irq_context: 0 sk_lock-AF_INET rcu_read_lock fastopen_seqlock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_node_0 irq_context: 0 &po->bind_lock ptype_lock irq_context: 0 cb_lock genl_mutex calipso_doi_list_lock irq_context: 0 ppp_mutex &pn->all_ppp_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &im->lock irq_context: 0 &wg->device_update_lock irq_context: 0 &wg->device_update_lock &wg->socket_update_lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &wg->device_update_lock &wq->mutex irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &wg->device_update_lock pool_lock#2 irq_context: 0 &wg->device_update_lock &wq->mutex &pool->lock irq_context: 0 &wg->device_update_lock &wq->mutex &x->wait#10 irq_context: 0 &wg->device_update_lock wq_pool_mutex irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 &wg->device_update_lock &rq->__lock irq_context: 0 &wg->device_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex.wait_lock irq_context: 0 &wg->device_update_lock &wq->mutex &pool->lock/1 irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &wg->device_update_lock &pool->lock/1 irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &wg->device_update_lock wq_mayday_lock irq_context: 0 &wg->device_update_lock &p->pi_lock irq_context: 0 &wg->device_update_lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &x->wait irq_context: 0 &wg->device_update_lock &rnp->exp_lock irq_context: 0 &wg->device_update_lock &rnp->exp_wq[2] irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex.wait_lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &wg->device_update_lock init_lock irq_context: 0 &wg->device_update_lock pcpu_lock irq_context: 0 &wg->device_update_lock &r->consumer_lock#2 irq_context: 0 &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &x->wait#24 irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &wg->device_update_lock &zone->lock irq_context: 0 &wg->device_update_lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 sctp_assocs_id_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 sctp_assocs_id_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &data->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock (work_completion)(flush) irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &data->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &data->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &wg->device_update_lock pool_lock irq_context: 0 &wg->device_update_lock &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_read_lock &rq->__lock irq_context: 0 &wg->device_update_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &data->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 percpu_counters_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &data->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &data->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &data->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 krc.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 krc.lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 &n->list_lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 pool_lock#2 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 &rq->__lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 kfence_freelist_lock irq_context: 0 rtnl_mutex wq_pool_mutex &rq->__lock irq_context: 0 rtnl_mutex wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 &wg->device_update_lock rcu_node_0 irq_context: 0 sk_lock-AF_PPPOX chan_lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_node_0 irq_context: 0 &wg->device_update_lock &rcu_state.expedited_wq irq_context: 0 &wg->device_update_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &wg->device_update_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_PPPOX &obj_hash[i].lock irq_context: 0 sk_lock-AF_PPPOX clock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &data->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &data->lock &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock &match->lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock &match->lock ptype_lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock wlock-AF_PACKET irq_context: 0 rcu_read_lock_bh &data->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &data->lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key &data->lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock &data->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock crngs.lock irq_context: 0 fs_reclaim rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_lock fasync_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET pgd_lock irq_context: 0 sk_lock-AF_INET key irq_context: 0 sk_lock-AF_INET pcpu_lock irq_context: 0 sk_lock-AF_INET percpu_counters_lock irq_context: 0 sk_lock-AF_INET pcpu_lock stock_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &list->lock#44 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex &pnettable->lock irq_context: 0 rtnl_mutex &bond->mode_lock irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu &rq->__lock irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) cgroup_mutex irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &list->lock#45 irq_context: 0 sk_lock-AF_TIPC &data->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &list->lock#45 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bond->mode_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bond->mode_lock pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &data->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &data->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &data->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &data->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex &base->lock irq_context: 0 &xt[i].mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &n->list_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &tipc_net(net)->bclock irq_context: 0 cb_lock genl_mutex &data->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &c->lock irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink rlock-AF_NETLINK irq_context: 0 nlk_cb_mutex-NETFILTER irq_context: 0 nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nlk_cb_mutex-NETFILTER &c->lock irq_context: 0 nlk_cb_mutex-NETFILTER rlock-AF_NETLINK irq_context: 0 nlk_cb_mutex-NETFILTER &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET l2tp_ip_lock irq_context: 0 nfnl_subsys_ipset &n->list_lock irq_context: 0 nfnl_subsys_ipset &n->list_lock &c->lock irq_context: 0 nfnl_subsys_ipset &rq->__lock irq_context: 0 nfnl_subsys_ipset &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex (console_sem).lock irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner_lock irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &root->kernfs_rwsem pgd_lock irq_context: 0 &root->kernfs_rwsem stock_lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem key irq_context: 0 &root->kernfs_rwsem pcpu_lock irq_context: 0 &root->kernfs_rwsem percpu_counters_lock irq_context: 0 &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &data->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &data->lock &base->lock irq_context: 0 rtnl_mutex &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &pdata->netdev_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 &bdi->wb_waitq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &wb->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_node_0 irq_context: 0 &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 fanout_mutex &n->list_lock irq_context: 0 fanout_mutex &n->list_lock &c->lock irq_context: 0 fanout_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock (console_sem).lock irq_context: 0 &f->f_pos_lock console_owner_lock irq_context: 0 &f->f_pos_lock console_owner irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner_lock irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 tomoyo_ss console_owner_lock irq_context: 0 tomoyo_ss console_owner irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock &p->pi_lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 quarantine_lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &base->lock &obj_hash[i].lock irq_context: 0 devnet_rename_sem irq_context: 0 rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 fs_reclaim &cfs_rq->removed.lock irq_context: 0 kn->active#5 fs_reclaim &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss pgd_lock irq_context: 0 tomoyo_ss stock_lock irq_context: 0 tomoyo_ss key irq_context: 0 tomoyo_ss pcpu_lock irq_context: 0 tomoyo_ss percpu_counters_lock irq_context: 0 tomoyo_ss pcpu_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#5 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &p->lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex pcpu_lock stock_lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &iint->mutex remove_cache_srcu irq_context: 0 &iint->mutex remove_cache_srcu quarantine_lock irq_context: 0 &iint->mutex remove_cache_srcu &c->lock irq_context: 0 &iint->mutex remove_cache_srcu &n->list_lock irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pool_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &queue->rskq_lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock kfence_freelist_lock irq_context: 0 &group->mark_mutex remove_cache_srcu &meta->lock irq_context: 0 &group->mark_mutex remove_cache_srcu kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &c->lock irq_context: 0 &fsnotify_mark_srcu &obj_hash[i].lock pool_lock irq_context: 0 loop_validate_mutex loop_validate_mutex.wait_lock irq_context: 0 loop_validate_mutex.wait_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem pgd_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem stock_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem key irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem pcpu_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem percpu_counters_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 &p->lock &of->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &n->list_lock irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#48 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock &base->lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#15 irq_context: 0 &hdev->req_lock &wq->mutex &rq->__lock irq_context: 0 &hdev->req_lock &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &hdev->req_lock &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &hdev->req_lock &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 &hdev->req_lock &data->lock irq_context: 0 &hdev->lock &data->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#83 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#83 &k->k_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock (console_sem).lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &data->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond0#8 irq_context: 0 (wq_completion)bond0#8 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#8 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#8 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#8 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#8 irq_context: 0 rtnl_mutex team->team_lock_key#8 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#8 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#8 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#8 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#8 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#8 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#8 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#8 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#8 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#8 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#8 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#8 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#8 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#8 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 lock irq_context: 0 rtnl_mutex team->team_lock_key#8 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#8 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#8 &____s->seqcount#2 irq_context: 0 rtnl_mutex team->team_lock_key#8 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#8 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#8 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 lweventlist_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 lweventlist_lock &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 lweventlist_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#8 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#8 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#8 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#8 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override &c->lock irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#15 irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#16 irq_context: 0 (wq_completion)wg-kex-wg2#16 &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#8 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)phy21 irq_context: 0 (wq_completion)phy21 &rq->__lock irq_context: 0 (wq_completion)phy21 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy21 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &data->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)phy22 irq_context: 0 (wq_completion)phy22 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy22 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &data->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &data->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#46/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#46/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim &rq->__lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &data->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &lruvec->lru_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &meta->lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock kfence_freelist_lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &hashinfo->ehash_locks[i] irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond0#8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET elock-AF_INET irq_context: softirq (&icsk->icsk_retransmit_timer) pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) &dir->lock irq_context: softirq (&icsk->icsk_retransmit_timer) &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) &dir->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) &dir->lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&icsk->icsk_retransmit_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&icsk->icsk_retransmit_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&icsk->icsk_retransmit_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &wg->device_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex krc.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq irq_context: 0 key#25 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 key#25 irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &ul->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#7 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &data->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#8 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx gdp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &xa->xa_lock#15 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#83 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#83 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#83 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->unregistration_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bridge_netdev_addr_lock_key krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &data->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&q->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->cb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->cb_lock flow_indr_block_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->cb_lock flow_indr_block_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->cb_lock flow_indr_block_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &chain->filter_chain_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &chain->filter_chain_lock &block->proto_destroy_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &chain->filter_chain_lock &block->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->cb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->cb_lock &tp->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->proto_destroy_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 &group->mark_mutex pgd_lock irq_context: 0 &group->mark_mutex stock_lock irq_context: 0 &group->mark_mutex rcu_read_lock pool_lock#2 irq_context: 0 &group->mark_mutex key irq_context: 0 &group->mark_mutex pcpu_lock irq_context: 0 &group->mark_mutex percpu_counters_lock irq_context: 0 &group->mark_mutex pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &info->lock key#9 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex bit_wait_table + i irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 bit_wait_table + i irq_context: 0 sb_writers#4 sb_writers#4 rcu_node_0 irq_context: 0 cb_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: softirq rcu_read_lock hwsim_radio_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock hwsim_radio_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &data->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 wlock-AF_PPPOX irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: softirq (&mp->timer) &br->multicast_lock &n->list_lock irq_context: softirq (&mp->timer) &br->multicast_lock &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu irq_context: 0 &iint->mutex ima_extend_list_mutex remove_cache_srcu quarantine_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock kfence_freelist_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &xt[i].mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &xt[i].mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &xt[i].mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock &x->wait#10 irq_context: 0 rtnl_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &ht->mutex &meta->lock irq_context: 0 rtnl_mutex &ht->mutex kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle kfence_freelist_lock irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 &obj_hash[i].lock irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 remove_cache_srcu rcu_read_lock pgd_lock irq_context: 0 remove_cache_srcu rcu_read_lock stock_lock irq_context: 0 remove_cache_srcu rcu_read_lock key irq_context: 0 remove_cache_srcu rcu_read_lock pcpu_lock irq_context: 0 remove_cache_srcu rcu_read_lock percpu_counters_lock irq_context: 0 remove_cache_srcu rcu_read_lock pcpu_lock stock_lock irq_context: 0 &group->mark_mutex rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_node_0 irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#3 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem fill_pool_map-wait-type-override pool_lock irq_context: 0 nfnl_subsys_ipset ip_set_ref_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 rtnl_mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 syslog_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &data->lock irq_context: 0 rtnl_mutex &idev->mc_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex quarantine_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &pcp->lock &zone->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &f->f_owner.lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pgd_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex key irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 sb_writers#4 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_writers#4 &wb->list_lock irq_context: 0 sb_writers#4 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &xt[i].mutex &mm->mmap_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 purge_vmap_area_lock quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 nl_table_wait.lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex pool_lock#2 irq_context: 0 &fsnotify_mark_srcu pgd_lock irq_context: 0 &fsnotify_mark_srcu stock_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock pool_lock#2 irq_context: 0 &fsnotify_mark_srcu key irq_context: 0 &fsnotify_mark_srcu pcpu_lock irq_context: 0 &fsnotify_mark_srcu percpu_counters_lock irq_context: 0 &fsnotify_mark_srcu pcpu_lock stock_lock irq_context: 0 loop_validate_mutex &cfs_rq->removed.lock irq_context: 0 loop_validate_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX chan_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 kn->active#63 fs_reclaim irq_context: 0 kn->active#63 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#63 &c->lock irq_context: 0 kn->active#63 &rq->__lock irq_context: 0 kn->active#63 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#63 stock_lock irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock kfence_freelist_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock rcu_node_0 irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#15 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &pnettable->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 file_rwsem &cfs_rq->removed.lock irq_context: 0 file_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 key#27 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET key#27 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET fs_reclaim rcu_node_0 irq_context: 0 sk_lock-AF_INET fs_reclaim &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET fs_reclaim &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &lock->wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 key#25 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &bgl->locks[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock &meta->lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &data->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &data->lock &base->lock irq_context: 0 sk_lock-AF_INET6 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 ebt_mutex rcu_read_lock &rq->__lock irq_context: 0 ebt_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &br->hash_lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &child->perf_event_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle kfence_freelist_lock irq_context: 0 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 bit_wait_table + i irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key batched_entropy_u8.lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key kfence_freelist_lock irq_context: 0 ebt_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 ebt_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#15 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#16 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex rcu_node_0 irq_context: 0 rtnl_mutex &xs->mutex &rq->__lock irq_context: 0 rtnl_mutex &xs->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &xs->mutex &n->list_lock irq_context: 0 rtnl_mutex &xs->mutex &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx gdp_mutex rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx gdp_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 &u->lock &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 sk_lock-AF_NETROM &data->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &xs->mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &data->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/netrom/nr_loopback.c:18 &data->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &rcu_state.expedited_wq irq_context: 0 &xs->mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &child->perf_event_mutex rcu_node_0 irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex remove_cache_srcu irq_context: 0 &xs->mutex remove_cache_srcu quarantine_lock irq_context: 0 &xs->mutex remove_cache_srcu &c->lock irq_context: 0 &xs->mutex remove_cache_srcu &n->list_lock irq_context: 0 &xs->mutex remove_cache_srcu &rq->__lock irq_context: 0 &xs->mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &xs->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &xs->mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &data->lock irq_context: 0 uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &xa->xa_lock#15 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &zone->lock irq_context: 0 pernet_ops_rwsem &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem quarantine_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 pernet_ops_rwsem subsys mutex#83 irq_context: 0 pernet_ops_rwsem subsys mutex#83 &k->k_lock irq_context: 0 pernet_ops_rwsem subsys mutex#83 &k->k_lock klist_remove_lock irq_context: 0 pernet_ops_rwsem &x->wait#9 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem dpm_list_mtx irq_context: 0 pernet_ops_rwsem &dev->power.lock irq_context: 0 pernet_ops_rwsem deferred_probe_mutex irq_context: 0 pernet_ops_rwsem device_links_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem gdp_mutex irq_context: 0 pernet_ops_rwsem &device->unregistration_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &aux->poke_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &aux->poke_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &aux->poke_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 map_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 map_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 map_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &meta->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: softirq (&sk->sk_timer)#2 &base->lock irq_context: softirq (&sk->sk_timer)#2 &base->lock &obj_hash[i].lock irq_context: softirq (&sk->sk_timer)#2 &data->lock irq_context: softirq (&sk->sk_timer)#2 pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 tomoyo_ss fill_pool_map-wait-type-override &c->lock irq_context: 0 tomoyo_ss fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 key#27 irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work)#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_NETLINK &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_NETLINK &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#9 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_retransmit_handshake) &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx lock kernfs_idr_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_node_0 irq_context: softirq (&sk->sk_timer)#2 &data->lock &obj_hash[i].lock irq_context: softirq (&sk->sk_timer)#2 &data->lock &base->lock irq_context: softirq (&sk->sk_timer)#2 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink (console_sem).lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_owner irq_context: 0 &pipe->mutex/1 &data->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM &obj_hash[i].lock irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM &base->lock irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &xs->mutex &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock init_task.mems_allowed_seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 nf_sockopt_mutex &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem ptlock_ptr(page) irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock quarantine_lock irq_context: softirq (&peer->timer_persistent_keepalive) init_task.mems_allowed_seq.seqcount irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &dir->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &n->list_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem fs_reclaim irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem crngs.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex &id_priv->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem id_table_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &x->wait#27 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &obj_hash[i].lock irq_context: 0 (wq_completion)events &cfs_rq->removed.lock irq_context: 0 (wq_completion)events &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) quarantine_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_tcp_conn_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_conn_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_conn_lock rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET slock-AF_PACKET &sk->sk_lock.wq irq_context: 0 slock-AF_PACKET &sk->sk_lock.wq irq_context: 0 slock-AF_PACKET &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_PACKET &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_PACKET &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex pool_lock#2 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &base->lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#8 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex netpoll_srcu &rq->__lock irq_context: 0 rtnl_mutex netpoll_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock &pcp->lock &zone->lock irq_context: 0 cb_lock &devlink->lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#7 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#7 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#7 &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#7 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 mount_lock irq_context: 0 cb_lock &devlink->lock_key#7 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dev_base_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex cpu_hotplug_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex bpf_devs_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &devlink_port->type_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex net_rwsem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex sysctl_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex sysctl_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &tbl->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex class irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex (&tbl->proxy_timer) irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &base->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &ul->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &net->xdp.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex krc.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex mirred_list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &nft_net->commit_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &tn->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &ent->pde_unload_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &ndev->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_query_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_report_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &pnn->pndevs.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &pnn->routes.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &pnettable->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex smc_ib_devices.mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex target_list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex _xmit_ETHER irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &k->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dev_hotplug_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dev_hotplug_mutex &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex subsys mutex#17 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &x->wait#9 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dpm_list_mtx irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &dev->power.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex deferred_probe_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex device_links_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex mount_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex bpf_devs_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.barrier_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.barrier_mutex &x->wait#24 irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.barrier_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 dev_base_lock irq_context: 0 cb_lock &devlink->lock_key#7 lweventlist_lock irq_context: 0 cb_lock &devlink->lock_key#7 netdev_unregistering_wq.lock irq_context: 0 cb_lock &devlink->lock_key#7 krc.lock irq_context: 0 cb_lock &devlink->lock_key#7 &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#7 &dir->lock#2 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#7 &base->lock irq_context: 0 cb_lock &devlink->lock_key#7 &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#7 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#7 fs_reclaim &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 &devlink_port->type_lock irq_context: 0 cb_lock &devlink->lock_key#7 nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#7 nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#7 &xa->xa_lock#14 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#7 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#7 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#7 quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#7 &xa->xa_lock#14 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 (work_completion)(&(&hwstats->traffic_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#7 &hwstats->hwsdev_list_lock irq_context: 0 cb_lock &devlink->lock_key#7 &(&fn_net->fib_chain)->lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 cb_lock &devlink->lock_key#7 (work_completion)(&data->fib_flush_work) irq_context: 0 cb_lock &devlink->lock_key#7 (work_completion)(&data->fib_event_work) irq_context: 0 cb_lock &devlink->lock_key#7 (work_completion)(&ht->run_work) irq_context: 0 cb_lock &devlink->lock_key#7 &ht->mutex irq_context: 0 cb_lock &devlink->lock_key#7 &ht->mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 &ht->mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#7 &nsim_trap_data->trap_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock &devlink->lock_key#7 pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#7 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#7 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 ®ion->snapshot_lock irq_context: 0 cb_lock &devlink->lock_key#7 pcpu_alloc_mutex irq_context: 0 cb_lock &devlink->lock_key#7 pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &xa->xa_lock#12 irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 stock_lock irq_context: 0 cb_lock &devlink->lock_key#7 stack_depot_init_mutex irq_context: 0 cb_lock &devlink->lock_key#7 stack_depot_init_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 crngs.lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex bus_type_sem irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex input_pool.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex stock_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex failover_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex pcpu_alloc_mutex irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex smc_ib_devices.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &vn->sock_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 cb_lock &devlink->lock_key#7 &xa->xa_lock#14 &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#7 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#7 &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pgd_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex stock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex key irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex percpu_counters_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pcpu_lock stock_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rlock-AF_INET irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &cfs_rq->removed.lock irq_context: 0 file_rwsem rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ebt_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 &type->s_umount_key#48 &rq->__lock irq_context: 0 &type->s_umount_key#48 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (work_completion)(&data->suspend_work) &rq->__lock irq_context: 0 (work_completion)(&data->suspend_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock &n->list_lock irq_context: 0 &hdev->req_lock &hdev->lock &n->list_lock &c->lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex &n->list_lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_new_handshake) irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock &obj_hash[i].lock irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock pool_lock#2 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex team->team_lock_key#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex team->team_lock_key#9 fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex team->team_lock_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex team->team_lock_key#9 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 mount_lock irq_context: 0 &f->f_pos_lock sb_writers#4 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->ad_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock irq_context: 0 &iint->mutex remove_cache_srcu &rq->__lock irq_context: 0 &fq->mq_flush_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &fq->mq_flush_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &iint->mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &rq->__lock &base->lock irq_context: 0 &mm->mmap_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 &iint->mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bt_proto_lock cmtp_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 cmtp_sk_list.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pcpu_lock stock_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock batched_entropy_u8.lock irq_context: 0 sb_writers#3 oom_adj_mutex &rcu_state.gp_wq irq_context: 0 sb_writers#3 oom_adj_mutex &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 oom_adj_mutex &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock kfence_freelist_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq &peer->endpoint_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 nfnl_subsys_ipset rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 kn->active#18 &n->list_lock irq_context: 0 kn->active#18 &n->list_lock &c->lock irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &iint->mutex &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &xt[i].mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &iint->mutex rcu_read_lock rename_lock irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 kn->active#5 remove_cache_srcu &cfs_rq->removed.lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&cp->cp_send_w)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rds_conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&cp->cp_recv_w)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&cp->cp_down_w) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock (wq_completion)krdsd irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) (work_completion)(&(&cp->cp_conn_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&barr->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)krdsd (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rds_cong_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_IPGRE irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_IPGRE &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_IPGRE krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (&dwork->timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &app->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (&app->join_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (&app->periodic_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &list->lock#14 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (&app->join_timer)#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &app->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &list->lock#15 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex key#16 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &bridge_netdev_addr_lock_key &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock kfence_freelist_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock key#26 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key#2/1 _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_node_0 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)hci2 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &data->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_node_0 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &data->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &____s->seqcount#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount#2 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &data->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &____s->seqcount#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_node_0 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &data->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &n->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 sb_writers#3 remove_cache_srcu irq_context: 0 (wq_completion)hci2#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 sb_writers#3 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 sb_writers#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &batadv_netdev_addr_lock_key irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->softif_vlan_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&data->dm_alert_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem &tbl->lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim &rq->__lock irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_KEY irq_context: 0 sk_lock-AF_KEY &rq->__lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &p->pi_lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KEY &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KEY slock-AF_KEY irq_context: 0 slock-AF_KEY irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 sb_writers#5 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex j1939_netdev_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &data->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_node_0 irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xa->xa_lock#6 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &lock->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg0#9 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 sk_lock-AF_TIPC batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &meta->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock clock-AF_INET6 rds_tcp_tc_list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &f->f_owner.lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &ul->lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex gdp_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.barrier_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock &devlink->lock_key#4 &x->wait#10 irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 &ht->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 &rnp->exp_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#4 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#4 remove_cache_srcu &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 bit_wait_table + i irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex batched_entropy_u8.lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &meta->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 pgd_lock irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 key irq_context: 0 cb_lock &devlink->lock_key#4 percpu_counters_lock irq_context: 0 cb_lock &devlink->lock_key#4 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#4 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock &devlink->lock_key#4 &pcp->lock &zone->lock irq_context: 0 cb_lock &devlink->lock_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#4 rcu_read_lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex fs_reclaim rcu_node_0 irq_context: 0 &xt[i].mutex fs_reclaim &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#4 &xa->xa_lock#14 &c->lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex netlbl_unlhsh_lock irq_context: 0 rtnl_mutex rcu_read_lock &po->bind_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex rcu_read_lock &po->bind_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &po->bind_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &po->bind_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex nr_list_lock irq_context: 0 rtnl_mutex nr_neigh_list_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 tasklist_lock &sighand->siglock &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 &hdev->req_lock (work_completion)(&(&hdev->rpa_expired)->work) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock fs_reclaim irq_context: 0 &hdev->req_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &hdev->req_lock &hdev->lock tk_core.seq.seqcount irq_context: 0 &hdev->req_lock &hdev->lock hci_sk_list.lock irq_context: 0 &hdev->req_lock &hdev->lock &data->lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock (work_completion)(&(&hdev->discov_off)->work) irq_context: 0 &hdev->lock (work_completion)(&(&hdev->service_cache)->work) irq_context: 0 &hdev->lock (work_completion)(&(&hdev->rpa_expired)->work) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &xa->xa_lock#6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &fsnotify_mark_srcu irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &fsnotify_mark_srcu &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 clock-AF_INET6 rds_tcp_tc_list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 fs_reclaim &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &xa->xa_lock#6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 krc.lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 stock_lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &token_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &msk->pm.lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) stock_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &dir->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &xa->xa_lock#6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &fsnotify_mark_srcu irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &fsnotify_mark_srcu &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 rtnl_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex &pool->lock/1 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 epnested_mutex &ep->mtx lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &net->xfrm.xfrm_policy_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &net->xfrm.xfrm_policy_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &net->xfrm.xfrm_policy_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock rlock-AF_KEY irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &data->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex epnested_mutex.wait_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#6 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &info->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#6 stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 key#9 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sem->wait_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &pcp->lock &zone->lock irq_context: 0 kn->active#15 remove_cache_srcu irq_context: 0 kn->active#15 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#15 remove_cache_srcu &c->lock irq_context: 0 kn->active#15 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#15 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#15 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 epnested_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex.wait_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex &____s->seqcount#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex &____s->seqcount irq_context: 0 namespace_sem fs_reclaim &rq->__lock irq_context: 0 namespace_sem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) k-sk_lock-AF_INET irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) k-slock-AF_INET irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) l2tp_ip_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) k-clock-AF_INET irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &xa->xa_lock#6 irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &obj_hash[i].lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) pool_lock#2 irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &fsnotify_mark_srcu irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) krc.lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &dir->lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mrt_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem fib_info_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem class irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx (inetaddr_chain).rwsem krc.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->mutex uevent_sock_mutex &____s->seqcount irq_context: 0 &mm->mmap_lock &base->lock irq_context: 0 &mm->mmap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock &meta->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->hash_lock quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->tx_global_lock _xmit_PIMREG#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 &n->list_lock &c->lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &meta->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock quarantine_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &data->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &data->lock &base->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_PIMREG irq_context: 0 &kcov->lock kcov_remote_lock &c->lock irq_context: 0 &kcov->lock kcov_remote_lock &n->list_lock irq_context: 0 &kcov->lock kcov_remote_lock &n->list_lock &c->lock irq_context: 0 &data->open_mutex uevent_sock_mutex &n->list_lock irq_context: 0 &data->open_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &data->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &n->list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &data->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount#2 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_node_0 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &data->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &____s->seqcount#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &data->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock &meta->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/2 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/2 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &vlan_netdev_addr_lock_key/2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &tbl->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#6 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &xa->xa_lock#14 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &data->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&devlink->rwork)->work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#10 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#6 &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#6 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key fill_pool_map-wait-type-override pool_lock irq_context: 0 epnested_mutex &ep->mtx remove_cache_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#6 &____s->seqcount#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#6 &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &____s->seqcount#2 irq_context: 0 epnested_mutex &ep->mtx remove_cache_srcu quarantine_lock irq_context: 0 epnested_mutex &ep->mtx remove_cache_srcu &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &wb->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &net->xfrm.xfrm_policy_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &policy->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &list->lock#33 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 epnested_mutex &ep->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx remove_cache_srcu &rq->__lock irq_context: 0 epnested_mutex &ep->mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx rcu_read_lock rcu_node_0 irq_context: 0 epnested_mutex &ep->mtx rcu_read_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx &sem->wait_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &net->xfrm.xfrm_policy_lock &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &net->xfrm.xfrm_policy_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rlock-AF_IEEE802154 irq_context: 0 rtnl_mutex (work_completion)(&(&bond->alb_work)->work) &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &lock->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &data->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &data->lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &data->lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 pernet_ops_rwsem &ht->mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem &ht->mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &ht->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) rcu_read_lock lock#8 irq_context: softirq (&n->timer) batched_entropy_u8.lock irq_context: softirq (&n->timer) &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem pcpu_lock stock_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#22/1 &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#10 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &data->lock irq_context: 0 fill_pool_map-wait-type-override &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#5 irq_context: 0 pernet_ops_rwsem rcu_read_lock stock_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers tomoyo_ss &n->list_lock irq_context: 0 sb_writers tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#6 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#5 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &lruvec->lru_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#6 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &info->lock key#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &p->alloc_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#9 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#9 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#9 &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#9 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 mount_lock irq_context: 0 cb_lock &devlink->lock_key#9 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex netpoll_srcu irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex net_rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &pn->hash_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tn->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &dev->tx_global_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET pack_mutex irq_context: 0 sk_lock-AF_INET text_mutex irq_context: 0 sk_lock-AF_INET text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET &fp->aux->used_maps_mutex irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sch->q.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex __ip_vs_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex _xmit_ETHER irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &im->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex krc.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex class irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (&tbl->proxy_timer) irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &base->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex flowtable_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &dir->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_SMC k-clock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock &n->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock &n->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &ndev->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &ndev->lock &base->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &ifa->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock krc.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_query_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_base_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex cpu_hotplug_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex bpf_devs_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &devlink_port->type_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex sysctl_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex sysctl_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &ul->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &net->xdp.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex mirred_list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &nft_net->commit_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &ent->pde_unload_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_report_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &pnn->pndevs.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &pnn->routes.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &pnettable->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex smc_ib_devices.mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex target_list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (console_sem).lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_NETROM &mm->mmap_lock irq_context: 0 sk_lock-AF_NETROM &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_NETROM &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &k->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET hrtimer_bases.lock irq_context: 0 sk_lock-AF_INET hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_hotplug_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_hotplug_mutex &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex subsys mutex#17 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq slock-AF_INET &c->lock irq_context: softirq slock-AF_INET hrtimer_bases.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &x->wait#9 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET hrtimer_bases.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq slock-AF_INET &____s->seqcount#2 irq_context: softirq slock-AF_INET &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dpm_list_mtx irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &dev->power.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex deferred_probe_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex device_links_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex mmu_notifier_invalidate_range_start irq_context: softirq slock-AF_INET &n->list_lock irq_context: softirq slock-AF_INET &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex mount_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex bpf_devs_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex &x->wait#24 irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 dev_base_lock irq_context: 0 cb_lock &devlink->lock_key#9 lweventlist_lock irq_context: 0 cb_lock &devlink->lock_key#9 netdev_unregistering_wq.lock irq_context: 0 cb_lock &devlink->lock_key#9 krc.lock irq_context: 0 cb_lock &devlink->lock_key#9 &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 &dir->lock#2 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &xa->xa_lock#6 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#9 &base->lock irq_context: 0 cb_lock &devlink->lock_key#9 &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#9 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 &devlink_port->type_lock irq_context: 0 cb_lock &devlink->lock_key#9 nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#9 nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#9 &xa->xa_lock#14 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &tbl->lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &rnp->exp_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &____s->seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 bit_wait_table + i irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg0#8 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex fib_info_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 slock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &queue->rskq_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC/1 k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&smc->tcp_listen_work) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &xa->xa_lock#14 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &sighand->siglock irq_context: 0 sk_lock-AF_INET &sighand->siglock irq_context: 0 sk_lock-AF_INET &sighand->siglock stock_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock pool_lock#2 irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 (work_completion)(&(&hwstats->traffic_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#9 &hwstats->hwsdev_list_lock irq_context: 0 cb_lock &devlink->lock_key#9 &(&fn_net->fib_chain)->lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 cb_lock &devlink->lock_key#9 (work_completion)(&data->fib_flush_work) irq_context: 0 cb_lock &devlink->lock_key#9 (work_completion)(&data->fib_event_work) irq_context: 0 cb_lock &devlink->lock_key#9 (work_completion)(&ht->run_work) irq_context: 0 cb_lock &devlink->lock_key#9 &ht->mutex irq_context: 0 cb_lock &devlink->lock_key#9 &ht->mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 &ht->mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &ht->mutex &meta->lock irq_context: 0 cb_lock &devlink->lock_key#9 &ht->mutex kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#9 &nsim_trap_data->trap_lock irq_context: 0 cb_lock &devlink->lock_key#9 &rnp->exp_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock &devlink->lock_key#9 pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#9 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#9 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 ®ion->snapshot_lock irq_context: 0 cb_lock &devlink->lock_key#9 pcpu_alloc_mutex irq_context: 0 cb_lock &devlink->lock_key#9 pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#9 pcpu_alloc_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 fs_reclaim &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#9 stock_lock irq_context: 0 cb_lock &devlink->lock_key#9 stack_depot_init_mutex irq_context: 0 cb_lock &devlink->lock_key#9 crngs.lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex bus_type_sem irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex input_pool.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex stock_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex failover_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex pcpu_alloc_mutex irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &vn->sock_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.barrier_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 batched_entropy_u8.lock irq_context: 0 cb_lock &devlink->lock_key#9 kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#9 &meta->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock &devlink->lock_key#9 &x->wait#10 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM &n->list_lock irq_context: 0 sk_lock-AF_NETROM &n->list_lock &c->lock irq_context: softirq net/netrom/nr_loopback.c:18 &n->list_lock irq_context: softirq net/netrom/nr_loopback.c:18 &n->list_lock &c->lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex gdp_mutex.wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#9 quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#9 remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#9 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#9 remove_cache_srcu &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#9 rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 &sb->s_type->i_mutex_key#3 &rcu_state.expedited_wq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex gdp_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex &n->list_lock irq_context: 0 sb_writers#4 &iint->mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: softirq net/netrom/nr_loopback.c:18 batched_entropy_u8.lock irq_context: softirq net/netrom/nr_loopback.c:18 kfence_freelist_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#6 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &x->wait#26 irq_context: 0 &f->f_pos_lock sb_writers#4 &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#6 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rq->__lock irq_context: softirq net/netrom/nr_loopback.c:18 &data->lock &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 &data->lock &base->lock irq_context: softirq net/netrom/nr_loopback.c:18 &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 kn->active#64 fs_reclaim irq_context: 0 kn->active#64 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#64 &c->lock irq_context: 0 kn->active#64 stock_lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &p->lock irq_context: 0 sb_writers#11 &p->lock fs_reclaim irq_context: 0 sb_writers#11 &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &p->lock stock_lock irq_context: 0 sb_writers#11 &p->lock &c->lock irq_context: 0 sb_writers#11 &p->lock pool_lock#2 irq_context: 0 sb_writers#11 &p->lock &of->mutex irq_context: 0 sb_writers#11 tk_core.seq.seqcount irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN &mm->mmap_lock &sem->wait_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock stock_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh _xmit_NONE#2 quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#11 stock_lock irq_context: 0 sb_writers#11 &p->lock &n->list_lock irq_context: 0 sb_writers#11 &p->lock &n->list_lock &c->lock irq_context: 0 sb_writers#11 &p->lock &rq->__lock irq_context: 0 sb_writers#11 &p->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 sb_writers#11 mount_lock irq_context: 0 sb_writers#11 sb_writers#11 tk_core.seq.seqcount irq_context: 0 sb_writers#11 sb_writers#11 &rq->__lock irq_context: 0 sb_writers#11 sb_writers#11 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 sb_writers#11 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 sb_writers#11 &wb->list_lock irq_context: 0 sb_writers#11 sb_writers#11 &wb->list_lock &sb->s_type->i_lock_key#31 irq_context: 0 &f->f_pos_lock &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &mm->mmap_lock stock_lock irq_context: 0 &f->f_pos_lock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 reading_mutex &rq->__lock irq_context: hardirq|softirq &x->wait#12 &p->pi_lock irq_context: 0 reading_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq|softirq &x->wait#12 &p->pi_lock &rq->__lock irq_context: hardirq|softirq &x->wait#12 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex batched_entropy_u8.lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#9 rtnl_mutex &meta->lock irq_context: 0 sk_lock-AF_CAN &data->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex lock &tn->idr_lock irq_context: 0 rtnl_mutex lock &tn->idr_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC &data->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &c->lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->ipsec_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex (work_completion)(&(&slave->notify_work)->work) irq_context: softirq &(&bond->mcast_work)->timer irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &dev_addr_list_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex zones_mutex irq_context: 0 rtnl_mutex zones_mutex fs_reclaim irq_context: 0 rtnl_mutex zones_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex zones_mutex &____s->seqcount irq_context: 0 rtnl_mutex zones_mutex pool_lock#2 irq_context: 0 rtnl_mutex zones_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex zones_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex zones_mutex rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex zones_mutex batched_entropy_u32.lock irq_context: 0 rtnl_mutex zones_mutex &base->lock irq_context: 0 rtnl_mutex zones_mutex &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex zones_mutex &rq->__lock irq_context: 0 rtnl_mutex zones_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex zones_mutex flowtable_lock irq_context: 0 rtnl_mutex &p->tcfa_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex flow_indr_block_lock irq_context: 0 rtnl_mutex flowtable_lock &ht->lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &ht->lock irq_context: 0 rtnl_mutex flowtable_lock &(&flowtable->gc_work)->timer irq_context: 0 rtnl_mutex flowtable_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock &base->lock irq_context: 0 rtnl_mutex flowtable_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &ht->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) rcu_read_lock &ht->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_add irq_context: 0 rtnl_mutex flowtable_lock &wq->mutex irq_context: 0 rtnl_mutex flowtable_lock &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex flowtable_lock &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &n->list_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)bond1#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 nfnl_subsys_ipset &zone->lock irq_context: 0 nfnl_subsys_ipset &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_wq[2] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&n->timer) rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &data->lock irq_context: softirq (&n->timer) rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset rcu_read_lock rcu_node_0 irq_context: 0 nfnl_subsys_ipset rcu_read_lock &rq->__lock irq_context: 0 nfnl_subsys_ipset rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &app->lock#2 pool_lock#2 irq_context: 0 rtnl_mutex &app->lock pool_lock#2 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: softirq (&app->join_timer)#2 &app->lock#2 pool_lock#2 irq_context: softirq (&app->join_timer)#2 &app->lock#2 &c->lock irq_context: softirq (&app->join_timer)#2 &app->lock#2 &list->lock#15 irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh &data->lock irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock &c->lock irq_context: softirq (&app->join_timer) &app->lock pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock &list->lock#14 irq_context: softirq (&app->join_timer) rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &data->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&app->join_timer)#2 &app->lock#2 &n->list_lock irq_context: softirq (&app->join_timer)#2 &app->lock#2 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy24 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &obj_hash[i].lock pool_lock irq_context: 0 namespace_sem pcpu_alloc_mutex rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &data->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET batched_entropy_u16.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock pcpu_lock stock_lock irq_context: 0 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 (wq_completion)bond1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u8.lock irq_context: softirq (&app->join_timer) &app->lock kfence_freelist_lock irq_context: softirq (&app->join_timer) &app->lock &n->list_lock irq_context: softirq (&app->join_timer) &app->lock &n->list_lock &c->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &meta->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 namespace_sem namespace_sem.wait_lock irq_context: 0 namespace_sem.wait_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xdp.lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &ei->socket.wq.wait irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount#7 irq_context: 0 nfnl_subsys_ctnetlink pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &dir->lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) rcu_read_lock_bh quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET &f->f_owner.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex ppp_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex ppp_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex ppp_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &pn->all_ppp_mutex irq_context: 0 rtnl_mutex ppp_mutex &pn->all_ppp_mutex fs_reclaim irq_context: 0 rtnl_mutex ppp_mutex &pn->all_ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ppp_mutex &pn->all_ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex &pn->all_ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &pn->all_ppp_mutex pool_lock#2 irq_context: 0 rtnl_mutex ppp_mutex fs_reclaim irq_context: 0 rtnl_mutex ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ppp_mutex pool_lock#2 irq_context: 0 rtnl_mutex ppp_mutex net_rwsem irq_context: 0 rtnl_mutex ppp_mutex net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex ppp_mutex &tn->lock irq_context: 0 rtnl_mutex ppp_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex ppp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &x->wait#9 irq_context: 0 rtnl_mutex ppp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex ppp_mutex remove_cache_srcu irq_context: 0 rtnl_mutex ppp_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex ppp_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex ppp_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex ppp_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex ppp_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex ppp_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &k->list_lock irq_context: 0 rtnl_mutex ppp_mutex gdp_mutex irq_context: 0 rtnl_mutex ppp_mutex gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex ppp_mutex lock irq_context: 0 rtnl_mutex ppp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex ppp_mutex lock kernfs_idr_lock &c->lock irq_context: 0 rtnl_mutex ppp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex ppp_mutex &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex ppp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex ppp_mutex bus_type_sem irq_context: 0 rtnl_mutex ppp_mutex sysfs_symlink_target_lock irq_context: 0 rtnl_mutex ppp_mutex &c->lock irq_context: 0 rtnl_mutex ppp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex ppp_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &dev->power.lock irq_context: 0 rtnl_mutex ppp_mutex dpm_list_mtx irq_context: 0 rtnl_mutex ppp_mutex &n->list_lock irq_context: 0 rtnl_mutex ppp_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex ppp_mutex subsys mutex#17 irq_context: 0 rtnl_mutex ppp_mutex subsys mutex#17 &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex ppp_mutex &dir->lock#2 irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex dev_hotplug_mutex irq_context: 0 rtnl_mutex ppp_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex ppp_mutex dev_base_lock irq_context: 0 rtnl_mutex ppp_mutex input_pool.lock irq_context: 0 rtnl_mutex ppp_mutex batched_entropy_u32.lock irq_context: 0 rtnl_mutex ppp_mutex &tbl->lock irq_context: 0 rtnl_mutex ppp_mutex stock_lock irq_context: 0 rtnl_mutex ppp_mutex sysctl_lock irq_context: 0 rtnl_mutex ppp_mutex nl_table_lock irq_context: 0 rtnl_mutex ppp_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex ppp_mutex proc_subdir_lock irq_context: 0 rtnl_mutex ppp_mutex proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex ppp_mutex proc_subdir_lock irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &pnettable->lock irq_context: 0 rtnl_mutex ppp_mutex smc_ib_devices.mutex irq_context: 0 rtnl_mutex &net->xdp.lock &xs->mutex irq_context: 0 rtnl_mutex &net->xdp.lock &xs->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &____s->seqcount#2 irq_context: 0 rtnl_mutex ppp_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex ppp_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex ppp_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &wq->mutex &rq->__lock irq_context: softirq (&app->join_timer) &app->lock &____s->seqcount#2 irq_context: softirq (&app->join_timer) &app->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &data->lock &base->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 &xs->mutex &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex team->team_lock_key#8 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#8 &tbl->lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &pn->hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &obj_hash[i].lock irq_context: 0 &bat_priv->forw_bcast_list_lock irq_context: 0 &bat_priv->forw_bat_list_lock irq_context: 0 &bat_priv->gw.list_lock irq_context: 0 (work_completion)(&(&bat_priv->bat_v.ogm_wq)->work) irq_context: 0 &bat_priv->bat_v.ogm_buff_mutex irq_context: 0 &bat_priv->bat_v.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 &bat_priv->bat_v.ogm_buff_mutex pool_lock#2 irq_context: 0 &bat_priv->tvlv.container_list_lock irq_context: 0 &bat_priv->tvlv.handler_list_lock irq_context: 0 (work_completion)(&(&bat_priv->nc.work)->work) irq_context: 0 key#17 irq_context: 0 key#18 irq_context: 0 &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 (work_completion)(&(&bat_priv->dat.work)->work) irq_context: 0 &hash->list_locks[i] irq_context: 0 (work_completion)(&(&bat_priv->bla.work)->work) irq_context: 0 key#21 irq_context: 0 (work_completion)(&(&bat_priv->mcast.work)->work) irq_context: 0 (work_completion)(&(&bat_priv->tt.work)->work) irq_context: 0 key#16 irq_context: 0 key#20 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &bat_priv->tt.req_list_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &bat_priv->tt.changes_list_lock irq_context: 0 &bat_priv->tt.roam_list_lock irq_context: 0 (work_completion)(&(&bat_priv->orig_work)->work) irq_context: 0 key#19 irq_context: 0 rtnl_mutex team->team_lock_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_node_0 irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#8 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#9 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#9 &tbl->lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &pn->hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#9 &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &batadv_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &batadv_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#7 batched_entropy_u32.lock irq_context: 0 rtnl_mutex team->team_lock_key#7 pcpu_alloc_mutex irq_context: 0 rtnl_mutex team->team_lock_key#7 pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 proc_subdir_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#7 &idev->mc_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sk_lock-AF_TIPC quarantine_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 pernet_ops_rwsem kernfs_idr_lock rcu_read_lock &p->pi_lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->destroy_work) rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock key#22 irq_context: 0 &nft_net->commit_mutex (work_completion)(&ht->run_work) &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex stock_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pcpu_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &meta->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &nft_net->commit_mutex &____s->seqcount#2 irq_context: 0 rtnl_mutex &xs->mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &xs->mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &xs->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#12 &obj_hash[i].lock pool_lock irq_context: 0 &xs->mutex batched_entropy_u8.lock irq_context: 0 &xs->mutex kfence_freelist_lock irq_context: 0 namespace_sem pcpu_alloc_mutex.wait_lock irq_context: 0 namespace_sem &p->pi_lock irq_context: 0 namespace_sem &p->pi_lock &rq->__lock irq_context: 0 namespace_sem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) kfence_freelist_lock irq_context: 0 sk_lock-AF_CAN &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->routes.lock &rq->__lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &base->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &trie->lock irq_context: 0 rcu_read_lock &trie->lock &zone->lock irq_context: 0 rcu_read_lock &trie->lock &____s->seqcount irq_context: 0 rcu_read_lock &trie->lock pool_lock#2 irq_context: 0 rcu_read_lock &trie->lock stock_lock irq_context: 0 &xs->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &c->lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex &____s->seqcount irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock rcu_node_0 irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_NONE pool_lock#2 irq_context: 0 rtnl_mutex _xmit_NONE &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_NONE krc.lock irq_context: 0 crypto_cfg_mutex fs_reclaim irq_context: 0 crypto_cfg_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crypto_cfg_mutex pool_lock#2 irq_context: 0 crypto_cfg_mutex rlock-AF_NETLINK irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crypto_alg_sem (crypto_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem crypto_alg_sem.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crypto_alg_sem (crypto_chain).rwsem &cfs_rq->removed.lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crypto_alg_sem.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &app->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->routes.lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->routes.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem devices_rwsem.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem devices_rwsem.wait_lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 rtnl_mutex ipvs->sync_mutex k-sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 &ipvs->sync_lock irq_context: 0 &ipvs->sync_buff_lock irq_context: softirq (&n->timer) rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem devices_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem rcu_read_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem purge_vmap_area_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->ad_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#28 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#29 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem krc.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem bus_type_sem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &lock->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &data->lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) &n->lock &____s->seqcount#2 irq_context: softirq (&n->timer) &n->lock &____s->seqcount irq_context: 0 misc_mtx &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 &data->lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 pernet_ops_rwsem &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sbi->s_writepages_rwsem &journal->j_wait_commit irq_context: 0 &sbi->s_writepages_rwsem &journal->j_wait_done_commit irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mii_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-kex-wg2#19 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events fqdir_free_work quarantine_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_TIPC &data->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &data->lock &base->lock irq_context: 0 sk_lock-AF_TIPC &data->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &____s->seqcount#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &____s->seqcount irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem ovs_mutex rcu_read_lock &obj_hash[i].lock irq_context: hardirq &x->wait#12 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#10 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#20 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#31 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock pool_lock#2 irq_context: 0 sk_lock-AF_LLC fs_reclaim irq_context: 0 sk_lock-AF_LLC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_LLC pool_lock#2 irq_context: 0 sk_lock-AF_LLC &dir->lock#2 irq_context: 0 sk_lock-AF_LLC &sap->sk_lock irq_context: 0 sk_lock-AF_LLC &c->lock irq_context: 0 sk_lock-AF_LLC wlock-AF_LLC irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh &data->lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_LLC &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC &base->lock irq_context: 0 sk_lock-AF_LLC &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC &data->lock irq_context: 0 sk_lock-AF_LLC &ei->socket.wq.wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#31 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC slock-AF_LLC &sk->sk_lock.wq irq_context: 0 slock-AF_LLC &sk->sk_lock.wq irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &xa->xa_lock#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &xa->xa_lock#6 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &xa->xa_lock#6 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &xa->xa_lock#6 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &c->lock irq_context: 0 sk_lock-AF_LLC &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &n->list_lock &c->lock irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC &sap->sk_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock#2 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &c->lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &n->list_lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &sem->wait_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock key#15 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_del irq_context: 0 &wg->device_update_lock rcu_read_lock rcu_node_0 irq_context: 0 &wg->device_update_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &wg->device_update_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &wg->device_update_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_stats irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_stats &rq->__lock irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_stats &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_stats &cfs_rq->removed.lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_stats &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock &x->wait#10 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)wg-crypt-wg0#8 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_session_hlist_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy21 irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy22 irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key krc.lock &base->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &p->pi_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &list->lock#37 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: softirq &(&flowtable->gc_work)->timer irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock quarantine_lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &data->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy27 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 &pn->l2tp_tunnel_idr_lock &n->list_lock irq_context: 0 &pn->l2tp_tunnel_idr_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &rnp->exp_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 (wq_completion)bond1#3 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) clock-AF_INET6 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rnp->exp_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx (wq_completion)phy23 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->lock &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 cpuset_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &base->lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &c->lock irq_context: 0 (wq_completion)bond1#4 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#18 remove_cache_srcu irq_context: 0 kn->active#18 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#18 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#18 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#18 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_node_0 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond1#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 &xs->mutex rcu_read_lock &pool->lock irq_context: 0 &xs->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &xs->mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#16 &n->list_lock irq_context: 0 kn->active#16 &n->list_lock &c->lock irq_context: 0 (wq_completion)bond2#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 kn->active#17 &n->list_lock irq_context: 0 kn->active#17 &n->list_lock &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rdev->bss_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rdev->bss_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rdev->bss_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rdev->bss_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sch->q.lock crngs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev_addr_list_lock_key krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key krc.lock irq_context: 0 &data->open_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex &sem->wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 &wq->mutex &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &data->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &data->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_node_0 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &data->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#3 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#3 pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &data->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_node_0 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &block->lock quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg1#10 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->ad_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cbs_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &data->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#11 rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 &xs->mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xs->mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 &xs->mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &dentry->d_lock rcu_read_lock &p->pi_lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &p->pi_lock irq_context: 0 (wq_completion)bond3#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &sem->wait_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &sem->wait_lock irq_context: 0 sb_writers#11 &p->pi_lock irq_context: 0 sb_writers#11 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &n->list_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond4#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#22/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &data->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &rq->__lock irq_context: 0 sb_writers#11 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) quarantine_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2#3 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: softirq (&peer->timer_persistent_keepalive) batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#11 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond3#3 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu pgd_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu key irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu percpu_counters_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu pcpu_lock stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rtnl_mutex wq_mayday_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_SMC rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_SMC rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_SMC rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)smc_hs_wq irq_context: 0 (wq_completion)smc_hs_wq (work_completion)(&smc->connect_work) irq_context: 0 (wq_completion)smc_hs_wq (work_completion)(&smc->connect_work) k-sk_lock-AF_INET irq_context: 0 (wq_completion)smc_hs_wq (work_completion)(&smc->connect_work) k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)smc_hs_wq (work_completion)(&smc->connect_work) k-slock-AF_INET irq_context: 0 (wq_completion)smc_hs_wq (work_completion)(&smc->connect_work) sk_lock-AF_SMC irq_context: 0 (wq_completion)smc_hs_wq (work_completion)(&smc->connect_work) sk_lock-AF_SMC slock-AF_SMC irq_context: 0 (wq_completion)smc_hs_wq (work_completion)(&smc->connect_work) slock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock irq_context: 0 &nft_net->commit_mutex nf_connlabels_lock irq_context: 0 (wq_completion)bond4#3 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pgd_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 k-sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 k-sk_lock-AF_INET k-slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock key irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock percpu_counters_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pcpu_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock key#23 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bond13 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#65 fs_reclaim irq_context: 0 kn->active#65 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#65 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#65 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#65 stock_lock irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem callback_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex &dl_b->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex rcu_read_lock &dl_b->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex css_set_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem sched_domains_mutex &stop_pi_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem.waiters.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#65 &c->lock irq_context: 0 kn->active#65 &n->list_lock irq_context: 0 kn->active#65 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond18 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond10 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond7 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)bond16#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex hrtimer_bases.lock irq_context: 0 rtnl_mutex hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond10#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)bond24 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond23#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 rtnl_mutex &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond19#3 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#3 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#3 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#4 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond3#4 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#3 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#4 irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#3 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond5#2 irq_context: 0 (wq_completion)bond5#2 &rq->__lock irq_context: 0 (wq_completion)bond5#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond6 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond7#2 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &xs->mutex &zone->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &xs->mutex &mm->mmap_lock &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 &xs->mutex &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#4 &rq->__lock irq_context: 0 (wq_completion)bond21#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25 &rq->__lock irq_context: 0 (wq_completion)bond25 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#11 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 rtnl_mutex &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &xs->mutex &pool->xsk_tx_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &pool->xsk_tx_list_lock irq_context: 0 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override init_task.mems_allowed_seq.seqcount irq_context: 0 rtnl_mutex &xs->mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 &xs->mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond35 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36 &rq->__lock irq_context: 0 (wq_completion)bond36 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond39 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &ht->mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &ht->mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond40 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events drain_vmap_work &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 (wq_completion)bond42 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 irq_context: 0 (wq_completion)bond43 &rq->__lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#2 irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#2 irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37#2 irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#2 irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#2 irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#2 irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#2 irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#2 irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &net->xdp.lock &rq->__lock irq_context: 0 rtnl_mutex &net->xdp.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46 irq_context: 0 (wq_completion)bond46 &rq->__lock irq_context: 0 (wq_completion)bond46 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &br->hash_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond34#2 irq_context: 0 (wq_completion)bond34#2 &rq->__lock irq_context: 0 (wq_completion)bond34#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#3 irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 irq_context: 0 (wq_completion)bond36#3 &rq->__lock irq_context: 0 (wq_completion)bond36#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex device_links_lock &rq->__lock irq_context: 0 rtnl_mutex device_links_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#4 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#3 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ht->mutex quarantine_lock irq_context: 0 (wq_completion)bond36#4 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond38#2 irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#4 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#3 irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#3 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond40#3 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &stopper->lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock irq_context: 0 rtnl_mutex (work_completion)(&(&br->gc_work)->work) &rq->__lock irq_context: 0 rtnl_mutex (work_completion)(&(&br->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#4 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#3 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#4 irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#3 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &____s->seqcount irq_context: 0 (wq_completion)bond41#4 irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond42#4 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond45#2 irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond23#4 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond24#4 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27 irq_context: 0 (wq_completion)bond27 &rq->__lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond30 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 fill_pool_map-wait-type-override &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#2 irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#3 irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &____s->seqcount#2 irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond72 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock fs_reclaim rcu_node_0 irq_context: 0 &vma->vm_lock->lock fs_reclaim &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock fs_reclaim &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#21 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)bond68 irq_context: 0 (wq_completion)bond68 &rq->__lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond71 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 irq_context: 0 (wq_completion)bond83 &rq->__lock irq_context: 0 (wq_completion)bond83 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER krc.lock &base->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond79 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond76#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#2 irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond88 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#3 irq_context: 0 (wq_completion)bond80#3 &rq->__lock irq_context: 0 (wq_completion)bond80#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond85#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_zero_key_material) irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond92 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond86#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond80#4 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock &meta->lock irq_context: 0 (wq_completion)bond61 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex lweventlist_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex remove_cache_srcu pgd_lock irq_context: 0 rtnl_mutex remove_cache_srcu stock_lock irq_context: 0 rtnl_mutex remove_cache_srcu key irq_context: 0 rtnl_mutex remove_cache_srcu pcpu_lock irq_context: 0 rtnl_mutex remove_cache_srcu percpu_counters_lock irq_context: 0 rtnl_mutex remove_cache_srcu pcpu_lock stock_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond85#3 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rlock-AF_INET6 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond84#3 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock &new->fa_lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 irq_context: 0 (wq_completion)bond89 &rq->__lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 irq_context: 0 (wq_completion)bond96 &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#4 irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu pcpu_lock stock_lock irq_context: 0 (wq_completion)bond98 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond10#3 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond99 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle key#28 irq_context: 0 (wq_completion)bond88#4 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond11#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond90 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 &rq->__lock irq_context: 0 (wq_completion)bond8#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond94 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 irq_context: 0 (wq_completion)bond91#3 &rq->__lock irq_context: 0 (wq_completion)bond91#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 irq_context: 0 (wq_completion)bond13#3 &rq->__lock irq_context: 0 (wq_completion)bond13#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond14#3 irq_context: 0 (wq_completion)bond14#3 &rq->__lock irq_context: 0 (wq_completion)bond14#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 irq_context: 0 (wq_completion)bond104 &rq->__lock irq_context: 0 (wq_completion)bond104 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond98#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work pool_lock#2 irq_context: 0 (wq_completion)bond94#3 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond106 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex lock#7 &rq->__lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#3 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#5 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond98#3 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond20#5 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond99#3 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond98#4 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#3 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 &rq->__lock irq_context: 0 (wq_completion)bond108 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 &rq->__lock irq_context: 0 (wq_completion)bond95#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#4 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110 &rq->__lock irq_context: 0 (wq_completion)bond110 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#2 irq_context: 0 (wq_completion)bond104#2 &rq->__lock irq_context: 0 (wq_completion)bond104#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 &rq->__lock irq_context: 0 (wq_completion)bond93#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#4 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#4 irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#4 irq_context: 0 (wq_completion)bond101#4 &rq->__lock irq_context: 0 (wq_completion)bond101#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#5 irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond103#3 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 &rq->__lock irq_context: 0 (wq_completion)bond109 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond102#4 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#3 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 irq_context: 0 (wq_completion)bond104#3 &rq->__lock irq_context: 0 (wq_completion)bond104#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 quarantine_lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &base->lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#4 &rq->__lock irq_context: 0 (wq_completion)bond22#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#3 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond20#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond27#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 &rq->__lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond109#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#4 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond110#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#3 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond111#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond30#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 irq_context: 0 (wq_completion)bond112#2 &rq->__lock irq_context: 0 (wq_completion)bond112#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 &rq->__lock irq_context: 0 (wq_completion)bond107#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond108#4 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31#2 irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond113#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 irq_context: 0 (wq_completion)bond32#3 &rq->__lock irq_context: 0 (wq_completion)bond32#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 irq_context: 0 (wq_completion)bond114#2 &rq->__lock irq_context: 0 (wq_completion)bond114#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#3 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#4 irq_context: 0 (wq_completion)bond110#4 &rq->__lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond115#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 &rq->__lock irq_context: 0 (wq_completion)bond110#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 &rq->__lock irq_context: 0 (wq_completion)bond98#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#2 &rq->__lock irq_context: 0 (wq_completion)bond111#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#4 irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 irq_context: 0 (wq_completion)bond34#4 &rq->__lock irq_context: 0 (wq_completion)bond34#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond113#3 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#4 irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 irq_context: 0 (wq_completion)bond35#5 &rq->__lock irq_context: 0 (wq_completion)bond35#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 irq_context: 0 (wq_completion)bond117 &rq->__lock irq_context: 0 (wq_completion)bond117 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#4 &rq->__lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125 irq_context: 0 (wq_completion)bond125 &rq->__lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#4 irq_context: 0 (wq_completion)bond113#4 &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#5 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (console_sem).lock irq_context: 0 br_ioctl_mutex rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex console_lock console_srcu console_owner irq_context: 0 br_ioctl_mutex rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex _xmit_ETHER irq_context: 0 br_ioctl_mutex rtnl_mutex quarantine_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock deferred_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock (console_sem).lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &br->hash_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (&pmctx->ip6_mc_router_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex &base->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (&pmctx->ip4_mc_router_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 br_ioctl_mutex rtnl_mutex &ht->mutex irq_context: 0 br_ioctl_mutex rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex deferred_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &br->multicast_lock irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_lock irq_context: 0 br_ioctl_mutex rtnl_mutex kernfs_idr_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&(&br->gc_work)->work) irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&(&br->gc_work)->work) &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&(&br->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 br_ioctl_mutex rtnl_mutex cpu_hotplug_lock irq_context: 0 br_ioctl_mutex rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 br_ioctl_mutex rtnl_mutex bpf_devs_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &pmc->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock krc.lock irq_context: 0 br_ioctl_mutex rtnl_mutex class irq_context: 0 br_ioctl_mutex rtnl_mutex (&tbl->proxy_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex &ul->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &net->xdp.lock irq_context: 0 br_ioctl_mutex rtnl_mutex mirred_list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &nft_net->commit_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ul->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &ent->pde_unload_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ndev->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_query_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_report_lock irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip4_mc_router_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip4_other_query.timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip4_own_query.timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip6_mc_router_timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip6_other_query.timer) irq_context: 0 br_ioctl_mutex rtnl_mutex (&brmctx->ip6_own_query.timer) irq_context: 0 br_ioctl_mutex rtnl_mutex &pnn->pndevs.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &pnn->routes.lock irq_context: 0 br_ioctl_mutex rtnl_mutex target_list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &bridge_netdev_addr_lock_key/1 irq_context: 0 br_ioctl_mutex rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 br_ioctl_mutex rtnl_mutex (work_completion)(&br->mcast_gc_work) irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex deferred_probe_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex device_links_lock irq_context: 0 br_ioctl_mutex rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex batched_entropy_u8.lock irq_context: 0 br_ioctl_mutex rtnl_mutex kfence_freelist_lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &meta->lock irq_context: 0 br_ioctl_mutex rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 irq_context: 0 (wq_completion)bond118 &rq->__lock irq_context: 0 (wq_completion)bond118 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex dev_base_lock irq_context: 0 br_ioctl_mutex lweventlist_lock irq_context: 0 br_ioctl_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex krc.lock irq_context: 0 br_ioctl_mutex &dir->lock#2 irq_context: 0 br_ioctl_mutex &dir->lock#2 &obj_hash[i].lock irq_context: 0 br_ioctl_mutex &dir->lock#2 pool_lock#2 irq_context: 0 br_ioctl_mutex netdev_unregistering_wq.lock irq_context: 0 br_ioctl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#3 irq_context: 0 (wq_completion)bond115#3 &rq->__lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#4 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 &rq->__lock irq_context: 0 (wq_completion)bond106#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond88#2 &rq->__lock irq_context: 0 (wq_completion)bond88#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#5 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 &rq->__lock irq_context: 0 (wq_completion)bond100#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90 &rq->__lock irq_context: 0 (wq_completion)bond90 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93 &rq->__lock irq_context: 0 (wq_completion)bond93 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond119 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond116#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock quarantine_lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 irq_context: 0 (wq_completion)bond120 &rq->__lock irq_context: 0 (wq_completion)bond120 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond15#3 &rq->__lock irq_context: 0 (wq_completion)bond15#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#5 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond116#3 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond118#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond40#5 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#5 &rq->__lock irq_context: 0 (wq_completion)bond21#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 &rq->__lock irq_context: 0 (wq_completion)bond108#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 irq_context: 0 (wq_completion)bond117#3 &rq->__lock irq_context: 0 (wq_completion)bond117#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 &rq->__lock irq_context: 0 (wq_completion)bond115#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond119#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond29#2 &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond29#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 &rq->__lock irq_context: 0 (wq_completion)bond91#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 &rq->__lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond120#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 &rq->__lock irq_context: 0 (wq_completion)bond101#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#5 irq_context: 0 (wq_completion)bond42#5 &rq->__lock irq_context: 0 (wq_completion)bond42#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond121#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#2 &rq->__lock irq_context: 0 (wq_completion)bond96#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#3 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 &rq->__lock irq_context: 0 (wq_completion)bond94#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#4 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#4 &rq->__lock irq_context: 0 (wq_completion)bond86#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 &rq->__lock irq_context: 0 (wq_completion)bond94#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond63 &rq->__lock irq_context: 0 (wq_completion)bond63 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114 &rq->__lock irq_context: 0 (wq_completion)bond114 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond123#2 irq_context: 0 (wq_completion)bond123#2 &rq->__lock irq_context: 0 (wq_completion)bond123#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond45#3 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 &rq->__lock irq_context: 0 (wq_completion)bond97#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#3 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 irq_context: 0 (wq_completion)bond136 &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#3 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond129#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond48 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 &rq->__lock irq_context: 0 (wq_completion)bond45#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 &rq->__lock irq_context: 0 (wq_completion)bond38#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->clear_peer_work) &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond138 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 &rq->__lock irq_context: 0 (wq_completion)bond127 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond127#3 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#17 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond132#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 &rq->__lock irq_context: 0 (wq_completion)bond119#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 &rq->__lock irq_context: 0 (wq_completion)bond138 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond140 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond51 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond133#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141 irq_context: 0 (wq_completion)bond141 &rq->__lock irq_context: 0 (wq_completion)bond141 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 &rq->__lock irq_context: 0 (wq_completion)bond126#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond52 irq_context: 0 (wq_completion)bond52 &rq->__lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 &rq->__lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#4 &rq->__lock irq_context: 0 (wq_completion)bond44#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#2 &rq->__lock irq_context: 0 (wq_completion)bond27#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#5 &rq->__lock irq_context: 0 (wq_completion)bond37#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond53 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond52 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &meta->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond54 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &zone->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 &rq->__lock irq_context: 0 (wq_completion)bond134 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond131#3 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) pool_lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 irq_context: 0 (wq_completion)bond144 &rq->__lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond55 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#5 &rq->__lock irq_context: 0 (wq_completion)bond39#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &meta->lock irq_context: 0 (wq_completion)bond106 &rq->__lock irq_context: 0 (wq_completion)bond106 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 irq_context: 0 (wq_completion)bond145 &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond55 &rq->__lock irq_context: 0 (wq_completion)bond55 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond56 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond85#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond125#3 &rq->__lock irq_context: 0 (wq_completion)bond125#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 &rq->__lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#4 &rq->__lock irq_context: 0 (wq_completion)bond88#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond57 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119 &rq->__lock irq_context: 0 (wq_completion)bond119 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 &rq->__lock irq_context: 0 (wq_completion)bond124#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond138#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond148 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond139#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#3 &rq->__lock irq_context: 0 (wq_completion)bond97#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond60#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 &rq->__lock irq_context: 0 (wq_completion)bond104#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond61#2 irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#5 &rq->__lock irq_context: 0 (wq_completion)bond23#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#2 &rq->__lock irq_context: 0 (wq_completion)bond133#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94 &rq->__lock irq_context: 0 (wq_completion)bond94 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 irq_context: 0 (wq_completion)bond140#2 &rq->__lock irq_context: 0 (wq_completion)bond140#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond133 &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond138#3 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62#2 irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond141#3 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63#2 irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond60#2 &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond60#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 &rq->__lock irq_context: 0 (wq_completion)bond138#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 &rq->__lock irq_context: 0 (wq_completion)bond137#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 irq_context: 0 (wq_completion)bond151 &rq->__lock irq_context: 0 (wq_completion)bond151 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock &c->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 irq_context: 0 (wq_completion)bond143#3 &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) &app->lock &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond141#4 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink fs_reclaim irq_context: 0 nfnl_subsys_ctnetlink fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond152 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond65 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 &rq->__lock irq_context: 0 (wq_completion)bond115 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 &rq->__lock irq_context: 0 (wq_completion)bond116#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#4 &rq->__lock irq_context: 0 (wq_completion)bond111#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#4 irq_context: 0 (wq_completion)bond142#4 &rq->__lock irq_context: 0 (wq_completion)bond142#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#3 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex net_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond67 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 &rq->__lock irq_context: 0 (wq_completion)bond132#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 &rq->__lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond130#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49 &rq->__lock irq_context: 0 (wq_completion)bond49 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond147#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 &rq->__lock irq_context: 0 (wq_completion)bond108#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond146#3 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#4 &rq->__lock irq_context: 0 (wq_completion)bond115#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 &rq->__lock irq_context: 0 (wq_completion)bond57 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#4 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond124#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond148#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#2 irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond144#4 irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fs_reclaim pgd_lock irq_context: 0 rtnl_mutex fs_reclaim stock_lock irq_context: 0 rtnl_mutex fs_reclaim key irq_context: 0 rtnl_mutex fs_reclaim pcpu_lock irq_context: 0 rtnl_mutex fs_reclaim percpu_counters_lock irq_context: 0 rtnl_mutex fs_reclaim pcpu_lock stock_lock irq_context: 0 (wq_completion)bond149#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond148#3 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#2 irq_context: 0 (wq_completion)bond70#2 &rq->__lock irq_context: 0 (wq_completion)bond70#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101 &rq->__lock irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond158 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 &rq->__lock irq_context: 0 (wq_completion)bond147#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#3 &rq->__lock irq_context: 0 (wq_completion)bond112#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#3 &rq->__lock irq_context: 0 (wq_completion)bond92#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond92#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 irq_context: 0 (wq_completion)bond149#3 &rq->__lock irq_context: 0 (wq_completion)bond149#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 &rq->__lock irq_context: 0 (wq_completion)bond148 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond71#3 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond145#4 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 irq_context: 0 (wq_completion)bond150#3 &rq->__lock irq_context: 0 (wq_completion)bond150#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72#3 irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#4 irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 &rq->__lock irq_context: 0 (wq_completion)bond56 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 &rq->__lock irq_context: 0 (wq_completion)bond148#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond147#4 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#4 &rq->__lock irq_context: 0 (wq_completion)bond145#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond74#3 irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 &rq->__lock irq_context: 0 (wq_completion)bond148#4 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond63#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond76#3 irq_context: 0 (wq_completion)bond76#3 &rq->__lock irq_context: 0 (wq_completion)bond76#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond146#4 &rq->__lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#3 &rq->__lock irq_context: 0 (wq_completion)bond141#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#3 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond154#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond165 irq_context: 0 (wq_completion)bond165 &rq->__lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 &rq->__lock irq_context: 0 (wq_completion)bond66 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond150#4 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond154#3 irq_context: 0 (wq_completion)bond154#3 &rq->__lock irq_context: 0 (wq_completion)bond154#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond155 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#3 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#4 irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#21 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78#4 irq_context: 0 (wq_completion)bond78#4 &rq->__lock irq_context: 0 (wq_completion)bond78#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond61#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond61#2 &rq->__lock irq_context: 0 (wq_completion)bond61#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond67 &rq->__lock irq_context: 0 (wq_completion)bond67 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond65 &rq->__lock irq_context: 0 (wq_completion)bond65 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond7#3 &rq->__lock irq_context: 0 (wq_completion)bond7#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#3 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 &rq->__lock irq_context: 0 (wq_completion)bond33#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex net_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond157#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#3 &rq->__lock irq_context: 0 (wq_completion)bond107#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock pool_lock#2 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#4 &rq->__lock irq_context: 0 (wq_completion)bond108#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#4 irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond79#4 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99 &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154 &rq->__lock irq_context: 0 (wq_completion)bond154 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#4 irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond159#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond169 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#3 irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#11 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#3 irq_context: 0 (wq_completion)bond159#3 &rq->__lock irq_context: 0 (wq_completion)bond159#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond128 &rq->__lock irq_context: 0 (wq_completion)bond128 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#4 irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157 &rq->__lock irq_context: 0 (wq_completion)bond157 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#2 &rq->__lock irq_context: 0 (wq_completion)bond9#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157#4 irq_context: 0 (wq_completion)bond157#4 &rq->__lock irq_context: 0 (wq_completion)bond157#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#3 &rq->__lock irq_context: 0 (wq_completion)bond86#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102 &rq->__lock irq_context: 0 (wq_completion)bond102 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81 &rq->__lock irq_context: 0 (wq_completion)bond81 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 &rq->__lock irq_context: 0 (wq_completion)bond154#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 &rq->__lock irq_context: 0 (wq_completion)bond152#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond161#2 irq_context: 0 (wq_completion)bond161#2 &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#4 irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 data_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 data_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN slock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN clock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN rlock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_ISDN irq_context: 0 (wq_completion)bond148#2 &rq->__lock irq_context: 0 (wq_completion)bond148#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59#2 &rq->__lock irq_context: 0 (wq_completion)bond59#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond82 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#4 &rq->__lock irq_context: 0 (wq_completion)bond158#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 &rq->__lock irq_context: 0 (wq_completion)bond162 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#4 irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pgd_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem stock_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem key irq_context: 0 rtnl_mutex &root->kernfs_rwsem pcpu_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 &rq->__lock irq_context: 0 (wq_completion)bond84#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#4 irq_context: 0 (wq_completion)bond160#4 &rq->__lock irq_context: 0 (wq_completion)bond160#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond162#3 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond84#4 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond161#4 irq_context: 0 (wq_completion)bond161#4 &rq->__lock irq_context: 0 (wq_completion)bond161#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond163#3 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#5 irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155 &rq->__lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#4 irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond164#3 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond86#5 irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 &rq->__lock irq_context: 0 (wq_completion)bond160 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond163#4 irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 &rq->__lock irq_context: 0 (wq_completion)bond162#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#5 irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond48 &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond48 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#4 &rq->__lock irq_context: 0 (wq_completion)bond114#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 &rq->__lock irq_context: 0 (wq_completion)bond121#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#4 irq_context: 0 (wq_completion)bond164#4 &rq->__lock irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 &rq->__lock irq_context: 0 (wq_completion)bond176 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 &rq->__lock irq_context: 0 (wq_completion)bond83#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 &rq->__lock irq_context: 0 (wq_completion)bond142#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 &rq->__lock irq_context: 0 (wq_completion)bond129#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#3 &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond165#4 irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond166#3 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond88#5 irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#4 &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond165#2 &rq->__lock irq_context: 0 (wq_completion)bond165#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#4 irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#2 &rq->__lock irq_context: 0 (wq_completion)bond145#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#5 &rq->__lock irq_context: 0 (wq_completion)bond80#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond156#3 &rq->__lock irq_context: 0 (wq_completion)bond156#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pmc->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &tsk->futex_exit_mutex rcu_node_0 irq_context: 0 &tsk->futex_exit_mutex &rcu_state.expedited_wq irq_context: 0 &tsk->futex_exit_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 irq_context: 0 (wq_completion)bond169#2 &rq->__lock irq_context: 0 (wq_completion)bond169#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#4 &rq->__lock irq_context: 0 (wq_completion)bond163#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 &rq->__lock irq_context: 0 (wq_completion)bond100#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#3 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#5 &rq->__lock irq_context: 0 (wq_completion)bond85#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond141#4 &rq->__lock irq_context: 0 (wq_completion)bond141#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#4 irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 &rq->__lock irq_context: 0 (wq_completion)bond168 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#17 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)bond58 &rq->__lock irq_context: 0 (wq_completion)bond58 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#3 &rq->__lock irq_context: 0 (wq_completion)bond144#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 irq_context: 0 (wq_completion)bond169#3 &rq->__lock irq_context: 0 (wq_completion)bond169#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond180 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond91#5 irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 &rq->__lock irq_context: 0 (wq_completion)bond130#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 &rq->__lock irq_context: 0 (wq_completion)bond139#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#4 &rq->__lock irq_context: 0 (wq_completion)bond79#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#4 irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 &rq->__lock irq_context: 0 (wq_completion)bond170#3 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#4 &rq->__lock irq_context: 0 (wq_completion)bond43#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond181 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#5 irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 &rq->__lock irq_context: 0 (wq_completion)bond117#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 &rq->__lock irq_context: 0 (wq_completion)bond174 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171 &rq->__lock irq_context: 0 (wq_completion)bond105#4 &rq->__lock irq_context: 0 (wq_completion)bond170#4 irq_context: 0 (wq_completion)bond170#4 &rq->__lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#4 &rq->__lock irq_context: 0 (wq_completion)bond153#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex pgd_lock irq_context: 0 rtnl_mutex uevent_sock_mutex stock_lock irq_context: 0 rtnl_mutex uevent_sock_mutex key irq_context: 0 rtnl_mutex uevent_sock_mutex pcpu_lock irq_context: 0 rtnl_mutex uevent_sock_mutex percpu_counters_lock irq_context: 0 rtnl_mutex uevent_sock_mutex pcpu_lock stock_lock irq_context: 0 (wq_completion)bond182 irq_context: 0 (wq_completion)bond182 &rq->__lock irq_context: 0 (wq_completion)bond182 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 &rq->__lock irq_context: 0 (wq_completion)bond152#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#5 irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#3 &rq->__lock irq_context: 0 (wq_completion)bond123#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond82 &rq->__lock irq_context: 0 (wq_completion)bond82 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 irq_context: 0 (wq_completion)bond173#2 &rq->__lock irq_context: 0 (wq_completion)bond173#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 tasklist_lock stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond155 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#4 &rq->__lock irq_context: 0 (wq_completion)bond150#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#4 irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond172#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 irq_context: 0 (wq_completion)bond94#4 &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond174#2 irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond172#3 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond173#3 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond173#4 irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond174#3 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond53 &rq->__lock irq_context: 0 (wq_completion)bond53 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#2 &rq->__lock irq_context: 0 (wq_completion)bond174#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123 &rq->__lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#4 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond170#3 &rq->__lock irq_context: 0 (wq_completion)bond170#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond169#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond36#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 &rq->__lock irq_context: 0 (wq_completion)bond164#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#5 irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 &rq->__lock irq_context: 0 (wq_completion)bond157#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 &rq->__lock irq_context: 0 (wq_completion)bond174#4 irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 irq_context: 0 (wq_completion)bond175#3 &rq->__lock irq_context: 0 (wq_completion)bond175#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#5 irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond87#5 &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#3 &rq->__lock irq_context: 0 (wq_completion)bond173#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 &rq->__lock irq_context: 0 (wq_completion)bond176#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#4 irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 &rq->__lock irq_context: 0 (wq_completion)bond92#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#22 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 irq_context: 0 (wq_completion)bond188 &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175 &rq->__lock irq_context: 0 (wq_completion)bond98#5 irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#3 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#4 irq_context: 0 (wq_completion)bond176#4 &rq->__lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 &rq->__lock irq_context: 0 (wq_completion)bond143#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169 &rq->__lock irq_context: 0 (wq_completion)bond169 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#3 &rq->__lock irq_context: 0 (wq_completion)bond165#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#5 irq_context: 0 (wq_completion)bond99#5 &rq->__lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 irq_context: 0 (wq_completion)bond179 &rq->__lock irq_context: 0 (wq_completion)bond179 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_node_0 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#4 irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond190 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond100#5 irq_context: 0 (wq_completion)bond100#5 &rq->__lock irq_context: 0 (wq_completion)bond100#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 &rq->__lock irq_context: 0 (wq_completion)bond64 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond180#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond178#3 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond178#4 irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#5 irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 irq_context: 0 (wq_completion)bond181#2 &rq->__lock irq_context: 0 (wq_completion)bond181#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#4 &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond152#3 &rq->__lock irq_context: 0 (wq_completion)bond152#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#4 &rq->__lock irq_context: 0 (wq_completion)bond179#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 irq_context: 0 (wq_completion)bond179#3 &rq->__lock irq_context: 0 (wq_completion)bond179#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond102#5 irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond63#2 &rq->__lock irq_context: 0 (wq_completion)bond63#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97 &rq->__lock irq_context: 0 (wq_completion)bond97 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#3 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &br->hash_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#5 irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#5 &rq->__lock irq_context: 0 (wq_completion)bond101#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#4 irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#4 &rq->__lock irq_context: 0 (wq_completion)bond143#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)bond184 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#2 &rq->__lock irq_context: 0 (wq_completion)bond138#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137 &rq->__lock irq_context: 0 (wq_completion)bond137 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#5 irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#18 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond118#3 &rq->__lock irq_context: 0 (wq_completion)bond118#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond170#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62#2 &rq->__lock irq_context: 0 (wq_completion)bond132 &rq->__lock irq_context: 0 (wq_completion)bond132 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond163 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 &rq->__lock irq_context: 0 (wq_completion)bond172 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 &rq->__lock irq_context: 0 (wq_completion)bond146 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 &rq->__lock irq_context: 0 (wq_completion)bond141#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond182#3 irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond181#4 irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152 &rq->__lock irq_context: 0 (wq_completion)bond152 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 &rq->__lock irq_context: 0 (wq_completion)bond26#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#4 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 irq_context: 0 (wq_completion)bond186 &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 &rq->__lock irq_context: 0 (wq_completion)bond187 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 &rq->__lock irq_context: 0 (wq_completion)bond163#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond183#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#4 irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 &rq->__lock irq_context: 0 (wq_completion)bond166#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 &rq->__lock irq_context: 0 (wq_completion)bond137#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#4 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#2 &rq->__lock irq_context: 0 (wq_completion)bond69#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond51 &rq->__lock irq_context: 0 (wq_completion)bond51 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#3 &rq->__lock irq_context: 0 (wq_completion)bond168#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock pgd_lock irq_context: 0 rtnl_mutex rcu_read_lock stock_lock irq_context: 0 rtnl_mutex rcu_read_lock key irq_context: 0 rtnl_mutex rcu_read_lock pcpu_lock irq_context: 0 rtnl_mutex rcu_read_lock percpu_counters_lock irq_context: 0 rtnl_mutex rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)bond183#3 irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#4 &rq->__lock irq_context: 0 (wq_completion)bond168#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#2 irq_context: 0 (wq_completion)bond188#2 &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#5 irq_context: 0 (wq_completion)bond107#5 &rq->__lock irq_context: 0 (wq_completion)bond107#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#5 &rq->__lock irq_context: 0 (wq_completion)bond92#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 &rq->__lock irq_context: 0 (wq_completion)bond184#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond19#5 &rq->__lock irq_context: 0 (wq_completion)bond19#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#3 irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond163#2 &rq->__lock irq_context: 0 (wq_completion)bond163#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 &rq->__lock irq_context: 0 (wq_completion)bond183 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 irq_context: 0 (wq_completion)bond197 &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 &rq->__lock irq_context: 0 (wq_completion)bond185#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 &rq->__lock irq_context: 0 (wq_completion)bond159#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#5 irq_context: 0 (wq_completion)bond108#5 &rq->__lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 &rq->__lock irq_context: 0 (wq_completion)bond119#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#3 &rq->__lock irq_context: 0 (wq_completion)bond182#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond185#3 irq_context: 0 (wq_completion)bond185#3 &rq->__lock irq_context: 0 (wq_completion)bond185#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 &rq->__lock irq_context: 0 (wq_completion)bond184#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)bond104#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 &rq->__lock irq_context: 0 (wq_completion)bond180#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#2 &rq->__lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#5 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183#2 &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 &rq->__lock irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#17 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#3 irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 &rq->__lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond191 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond100#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#4 &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#3 irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#5 &rq->__lock irq_context: 0 (wq_completion)bond96#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#3 &rq->__lock irq_context: 0 (wq_completion)bond172#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191 &rq->__lock irq_context: 0 (wq_completion)bond191 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 irq_context: 0 (wq_completion)bond192#2 &rq->__lock irq_context: 0 (wq_completion)bond192#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rtnl_mutex &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 rtnl_mutex &rq->__lock &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 rtnl_mutex &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 &rq->__lock irq_context: 0 (wq_completion)bond151#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#4 irq_context: 0 (wq_completion)bond187#4 &rq->__lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#3 irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&ht->run_work) &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &data->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond11#2 &rq->__lock irq_context: 0 (wq_completion)bond11#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_loc